summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <therealfalcon@gmail.com>2021-08-23 17:28:05 -0500
committerJames Falcon <therealfalcon@gmail.com>2021-08-23 17:28:05 -0500
commit86afb49129170eccdf823c4ca1201c412a4a944a (patch)
treee524484b11b9ad10d7bc1b267738105ad151f89f
parentec60b1b129f85a6de39bc0e5e270dc174257859c (diff)
parent6803368dec44c8b42196931b3a42d014a10b600d (diff)
downloadcloud-init-git-86afb49129170eccdf823c4ca1201c412a4a944a.tar.gz
merge from upstream/main at 21.3-1-g6803368d
-rw-r--r--ChangeLog101
-rw-r--r--HACKING.rst8
-rw-r--r--README.md6
-rw-r--r--bash_completion/cloud-init5
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py279
-rw-r--r--cloudinit/cmd/devel/logs.py24
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py14
-rw-r--r--cloudinit/cmd/devel/parser.py3
-rw-r--r--cloudinit/cmd/main.py32
-rw-r--r--cloudinit/cmd/tests/test_main.py2
-rw-r--r--cloudinit/config/cc_apt_configure.py26
-rw-r--r--cloudinit/config/cc_disk_setup.py13
-rw-r--r--cloudinit/config/cc_growpart.py4
-rw-r--r--cloudinit/config/cc_mounts.py17
-rw-r--r--cloudinit/config/cc_ntp.py33
-rw-r--r--cloudinit/config/cc_puppet.py159
-rw-r--r--cloudinit/config/cc_resizefs.py5
-rw-r--r--cloudinit/config/cc_resolv_conf.py28
-rw-r--r--cloudinit/config/cc_yum_add_repo.py6
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py28
-rwxr-xr-xcloudinit/distros/__init__.py56
-rw-r--r--cloudinit/distros/alpine.py21
-rw-r--r--cloudinit/distros/arch.py19
-rw-r--r--cloudinit/distros/bsd.py3
-rw-r--r--cloudinit/distros/debian.py22
-rw-r--r--cloudinit/distros/dragonflybsd.py12
-rw-r--r--cloudinit/distros/eurolinux.py9
-rw-r--r--cloudinit/distros/freebsd.py15
-rw-r--r--cloudinit/distros/gentoo.py9
-rw-r--r--cloudinit/distros/opensuse.py20
-rw-r--r--cloudinit/distros/photon.py142
-rw-r--r--cloudinit/distros/rhel.py18
-rw-r--r--cloudinit/distros/rocky.py9
-rw-r--r--cloudinit/distros/virtuozzo.py9
-rw-r--r--cloudinit/event.py70
-rw-r--r--cloudinit/handlers/jinja_template.py5
-rw-r--r--cloudinit/log.py4
-rw-r--r--cloudinit/net/__init__.py18
-rw-r--r--cloudinit/net/activators.py279
-rw-r--r--cloudinit/net/bsd.py15
-rw-r--r--cloudinit/net/dhcp.py2
-rw-r--r--cloudinit/net/freebsd.py22
-rw-r--r--cloudinit/net/netbsd.py8
-rw-r--r--cloudinit/net/netplan.py10
-rw-r--r--cloudinit/net/network_state.py125
-rw-r--r--cloudinit/net/networkd.py259
-rw-r--r--cloudinit/net/openbsd.py9
-rw-r--r--cloudinit/net/renderer.py2
-rw-r--r--cloudinit/net/renderers.py17
-rw-r--r--cloudinit/net/sysconfig.py12
-rw-r--r--cloudinit/net/tests/test_dhcp.py6
-rw-r--r--cloudinit/net/tests/test_network_state.py109
-rw-r--r--cloudinit/patcher.py11
-rw-r--r--cloudinit/reporting/events.py8
-rw-r--r--cloudinit/settings.py1
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py329
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py10
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py17
-rw-r--r--cloudinit/sources/DataSourceOVF.py49
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py11
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py9
-rw-r--r--cloudinit/sources/DataSourceScaleway.py10
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py8
-rw-r--r--cloudinit/sources/DataSourceVMware.py871
-rw-r--r--cloudinit/sources/__init__.py76
-rwxr-xr-xcloudinit/sources/helpers/azure.py34
-rw-r--r--cloudinit/sources/tests/test_init.py29
-rw-r--r--cloudinit/ssh_util.py174
-rw-r--r--cloudinit/stages.py125
-rw-r--r--cloudinit/tests/helpers.py5
-rw-r--r--cloudinit/tests/test_event.py26
-rw-r--r--cloudinit/tests/test_stages.py122
-rw-r--r--cloudinit/tests/test_url_helper.py4
-rw-r--r--cloudinit/tests/test_util.py153
-rw-r--r--cloudinit/util.py118
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.tmpl79
-rw-r--r--doc/examples/cloud-config-apt.txt6
-rw-r--r--doc/examples/cloud-config-puppet.txt60
-rw-r--r--doc/examples/cloud-config-user-groups.txt2
-rw-r--r--doc/rtd/index.rst3
-rw-r--r--doc/rtd/topics/availability.rst12
-rw-r--r--doc/rtd/topics/cli.rst4
-rw-r--r--doc/rtd/topics/code_review.rst8
-rw-r--r--doc/rtd/topics/datasources.rst1
-rw-r--r--doc/rtd/topics/datasources/digitalocean.rst4
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst2
-rw-r--r--doc/rtd/topics/datasources/ovf.rst6
-rw-r--r--doc/rtd/topics/datasources/vmware.rst359
-rw-r--r--doc/rtd/topics/debugging.rst4
-rw-r--r--doc/rtd/topics/events.rst95
-rw-r--r--doc/rtd/topics/faq.rst4
-rw-r--r--doc/rtd/topics/format.rst2
-rw-r--r--doc/rtd/topics/instancedata.rst41
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst5
-rw-r--r--doc/rtd/topics/network-config.rst7
-rw-r--r--doc/rtd/topics/testing.rst4
-rw-r--r--doc/sources/ovf/example/ovf-env.xml8
-rw-r--r--integration-requirements.txt2
-rw-r--r--packages/pkg-deps.json14
-rw-r--r--packages/redhat/cloud-init.spec.in7
-rw-r--r--requirements.txt9
-rwxr-xr-xsetup.py4
-rw-r--r--[-rwxr-xr-x]systemd/cloud-init-generator.tmpl3
-rw-r--r--systemd/cloud-init-hotplugd.service22
-rw-r--r--systemd/cloud-init-hotplugd.socket13
-rw-r--r--systemd/cloud-init.service.tmpl5
-rwxr-xr-xsysvinit/freebsd/cloudinit2
-rw-r--r--templates/chrony.conf.photon.tmpl48
-rw-r--r--templates/hosts.photon.tmpl22
-rw-r--r--templates/ntp.conf.photon.tmpl61
-rw-r--r--templates/resolv.conf.tmpl2
-rw-r--r--templates/systemd.resolved.conf.tmpl15
-rw-r--r--tests/cloud_tests/releases.yaml17
-rw-r--r--tests/cloud_tests/testcases/examples/setup_run_puppet.yaml10
-rw-r--r--tests/cloud_tests/util.py2
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test138
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test1.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test238
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test2.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test338
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test3.pub1
-rw-r--r--tests/integration_tests/assets/test_version_change.pklbin0 -> 21 bytes
-rw-r--r--tests/integration_tests/assets/trusty_with_mime.pkl572
-rw-r--r--tests/integration_tests/bugs/test_gh868.py20
-rw-r--r--tests/integration_tests/clouds.py30
-rw-r--r--tests/integration_tests/conftest.py8
-rw-r--r--tests/integration_tests/integration_settings.py7
-rw-r--r--tests/integration_tests/modules/test_combined.py195
-rw-r--r--tests/integration_tests/modules/test_command_output.py23
-rw-r--r--tests/integration_tests/modules/test_disk_setup.py192
-rw-r--r--tests/integration_tests/modules/test_hotplug.py102
-rw-r--r--tests/integration_tests/modules/test_ntp_servers.py89
-rw-r--r--tests/integration_tests/modules/test_persistence.py30
-rw-r--r--tests/integration_tests/modules/test_runcmd.py25
-rw-r--r--tests/integration_tests/modules/test_snap.py2
-rw-r--r--tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py2
-rw-r--r--tests/integration_tests/modules/test_ssh_import_id.py6
-rw-r--r--tests/integration_tests/modules/test_ssh_keysfile.py191
-rw-r--r--tests/integration_tests/modules/test_user_events.py95
-rw-r--r--tests/integration_tests/modules/test_version_change.py56
-rw-r--r--tests/integration_tests/test_upgrade.py171
-rw-r--r--tests/integration_tests/util.py47
-rw-r--r--tests/unittests/cmd/devel/test_hotplug_hook.py218
-rw-r--r--tests/unittests/test_builtin_handlers.py30
-rw-r--r--tests/unittests/test_cli.py4
-rw-r--r--tests/unittests/test_datasource/test_azure.py95
-rw-r--r--tests/unittests/test_datasource/test_common.py3
-rw-r--r--tests/unittests/test_datasource/test_ovf.py176
-rw-r--r--tests/unittests/test_datasource/test_smartos.py10
-rw-r--r--tests/unittests/test_datasource/test_vmware.py391
-rw-r--r--tests/unittests/test_distros/test_create_users.py2
-rw-r--r--tests/unittests/test_distros/test_dragonflybsd.py25
-rw-r--r--tests/unittests/test_distros/test_netconfig.py140
-rw-r--r--tests/unittests/test_distros/test_photon.py68
-rw-r--r--tests/unittests/test_ds_identify.py279
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py23
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py9
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py236
-rw-r--r--tests/unittests/test_handler/test_handler_resolv_conf.py105
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py40
-rw-r--r--tests/unittests/test_net.py251
-rw-r--r--tests/unittests/test_net_activators.py255
-rw-r--r--tests/unittests/test_net_freebsd.py63
-rw-r--r--tests/unittests/test_render_cloudcfg.py5
-rw-r--r--tests/unittests/test_reporting.py18
-rw-r--r--tests/unittests/test_sshutil.py846
-rw-r--r--tests/unittests/test_util.py18
-rw-r--r--tools/.github-cla-signers12
-rwxr-xr-xtools/build-on-netbsd23
-rwxr-xr-xtools/ds-identify85
-rwxr-xr-xtools/hook-hotplug26
-rwxr-xr-xtools/read-dependencies12
-rwxr-xr-xtools/render-cloudcfg4
-rwxr-xr-xtools/run-container9
-rw-r--r--tox.ini16
-rw-r--r--udev/10-cloud-init-hook-hotplug.rules6
178 files changed, 9898 insertions, 892 deletions
diff --git a/ChangeLog b/ChangeLog
index 98528249..6de07ad3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,104 @@
+21.3
+ - Azure: During primary nic detection, check interface status continuously
+ before rebinding again (#990) [aswinrajamannar]
+ - Fix home permissions modified by ssh module (SC-338) (#984)
+ (LP: #1940233)
+ - Add integration test for sensitive jinja substitution (#986)
+ - Ignore hotplug socket when collecting logs (#985) (LP: #1940235)
+ - testing: Add missing mocks to test_vmware.py (#982)
+ - add Zadara Edge Cloud Platform to the supported clouds list (#963)
+ [sarahwzadara]
+ - testing: skip upgrade tests on LXD VMs (#980)
+ - Only invoke hotplug socket when functionality is enabled (#952)
+ - Revert unnecesary lcase in ds-identify (#978) [Andrew Kutz]
+ - cc_resolv_conf: fix typos (#969) [Shreenidhi Shedi]
+ - Replace broken httpretty tests with mock (SC-324) (#973)
+ - Azure: Check if interface is up after sleep when trying to bring it up
+ (#972) [aswinrajamannar]
+ - Update dscheck_VMware's rpctool check (#970) [Shreenidhi Shedi]
+ - Azure: Logging the detected interfaces (#968) [Moustafa Moustafa]
+ - Change netifaces dependency to 0.10.4 (#965) [Andrew Kutz]
+ - Azure: Limit polling network metadata on connection errors (#961)
+ [aswinrajamannar]
+ - Update inconsistent indentation (#962) [Andrew Kutz]
+ - cc_puppet: support AIO installations and more (#960) [Gabriel Nagy]
+ - Add Puppet contributors to CLA signers (#964) [Noah Fontes]
+ - Datasource for VMware (#953) [Andrew Kutz]
+ - photon: refactor hostname handling and add networkd activator (#958)
+ [sshedi]
+ - Stop copying ssh system keys and check folder permissions (#956)
+ [Emanuele Giuseppe Esposito]
+ - testing: port remaining cloud tests to integration testing framework
+ (SC-191) (#955)
+ - generate contents for ovf-env.xml when provisioning via IMDS (#959)
+ [Anh Vo]
+ - Add support for EuroLinux 7 && EuroLinux 8 (#957) [Aleksander Baranowski]
+ - Implementing device_aliases as described in docs (#945)
+ [Mal Graty] (LP: #1867532)
+ - testing: fix test_ssh_import_id.py (#954)
+ - Add ability to manage fallback network config on PhotonOS (#941) [sshedi]
+ - Add VZLinux support (#951) [eb3095]
+ - VMware: add network-config support in ovf-env.xml (#947) [PengpengSun]
+ - Update pylint to v2.9.3 and fix the new issues it spots (#946)
+ [Paride Legovini]
+ - Azure: mount default provisioning iso before try device listing (#870)
+ [Anh Vo]
+ - Document known hotplug limitations (#950)
+ - Initial hotplug support (#936)
+ - Fix MIME policy failure on python version upgrade (#934)
+ - run-container: fixup the centos repos baseurls when using http_proxy
+ (#944) [Paride Legovini]
+ - tools: add support for building rpms on rocky linux (#940)
+ - ssh-util: allow cloudinit to merge all ssh keys into a custom user
+ file, defined in AuthorizedKeysFile (#937) [Emanuele Giuseppe Esposito]
+ (LP: #1911680)
+ - VMware: new "allow_raw_data" switch (#939) [xiaofengw-vmware]
+ - bump pycloudlib version (#935)
+ - add renanrodrigo as a contributor (#938) [Renan Rodrigo]
+ - testing: simplify test_upgrade.py (#932)
+ - freebsd/net_v1 format: read MTU from root (#930) [Gonéri Le Bouder]
+ - Add new network activators to bring up interfaces (#919)
+ - - Detect a Python version change and clear the cache (#857)
+ [Robert Schweikert]
+ - cloud_tests: fix the Impish release name (#931) [Paride Legovini]
+ - Removed distro specific network code from Photon (#929) [sshedi]
+ - Add support for VMware PhotonOS (#909) [sshedi]
+ - cloud_tests: add impish release definition (#927) [Paride Legovini]
+ - docs: fix stale links rename master branch to main (#926)
+ - Fix DNS in NetworkState (SC-133) (#923)
+ - tests: Add 'adhoc' mark for integration tests (#925)
+ - Fix the spelling of "DigitalOcean" (#924) [Mark Mercado]
+ - Small Doc Update for ReportEventStack and Test (#920) [Mike Russell]
+ - Replace deprecated collections.Iterable with abc replacement (#922)
+ (LP: #1932048)
+ - testing: OCI availability domain is now required (SC-59) (#910)
+ - add DragonFlyBSD support (#904) [Gonéri Le Bouder]
+ - Use instance-data-sensitive.json in jinja templates (SC-117) (#917)
+ (LP: #1931392)
+ - doc: Update NoCloud docs stating required files (#918) (LP: #1931577)
+ - build-on-netbsd: don't pin a specific py3 version (#913)
+ [Gonéri Le Bouder]
+ - - Create the log file with 640 permissions (#858) [Robert Schweikert]
+ - Allow braces to appear in dhclient output (#911) [eb3095]
+ - Docs: Replace all freenode references with libera (#912)
+ - openbsd/net: flush the route table on net restart (#908)
+ [Gonéri Le Bouder]
+ - Add Rocky Linux support to cloud-init (#906) [Louis Abel]
+ - Add "esposem" as contributor (#907) [Emanuele Giuseppe Esposito]
+ - Add integration test for #868 (#901)
+ - Added support for importing keys via primary/security mirror clauses
+ (#882) [Paul Goins] (LP: #1925395)
+ - [examples] config-user-groups expire in the future (#902)
+ [Geert Stappers]
+ - BSD: static network, set the mtu (#894) [Gonéri Le Bouder]
+ - Add integration test for lp-1920939 (#891)
+ - Fix unit tests breaking from new httpretty version (#903)
+ - Allow user control over update events (#834)
+ - Update test characters in substitution unit test (#893)
+ - cc_disk_setup.py: remove UDEVADM_CMD definition as not used (#886)
+ [dermotbradley]
+ - Add AlmaLinux OS support (#872) [Andrew Lukoshko]
+
21.2
- Add \r\n check for SSH keys in Azure (#889)
- Revert "Add support to resize rootfs if using LVM (#721)" (#887)
diff --git a/HACKING.rst b/HACKING.rst
index 623b3136..fc858672 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -34,7 +34,7 @@ Follow these steps to submit your first pull request to cloud-init:
"Cloud-Init CLA"
* You also may contact user ``rick_h`` in the ``#cloud-init``
- channel on the Freenode IRC network.
+ channel on the Libera IRC network.
* Configure git with your email and name for commit messages.
@@ -55,7 +55,7 @@ Follow these steps to submit your first pull request to cloud-init:
git clone git://github.com/canonical/cloud-init
cd cloud-init
git remote add GH_USER git@github.com:GH_USER/cloud-init.git
- git push GH_USER master
+ git push GH_USER main
* Read through the cloud-init `Code Review Process`_, so you understand
how your changes will end up in cloud-init's codebase.
@@ -78,7 +78,7 @@ Follow these steps to submit your first pull request to cloud-init:
.. _repository: https://github.com/canonical/cloud-init
.. _contributor license agreement: https://ubuntu.com/legal/contributors
.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
-.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/master/tools/.github-cla-signers
+.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/main/tools/.github-cla-signers
.. _PR #344: https://github.com/canonical/cloud-init/pull/344
.. _PR #345: https://github.com/canonical/cloud-init/pull/345
@@ -159,7 +159,7 @@ Then, someone in the `Ubuntu Server`_ team will review your changes and
follow up in the pull request. Look at the `Code Review Process`_ doc
to understand the following steps.
-Feel free to ping and/or join ``#cloud-init`` on freenode irc if you
+Feel free to ping and/or join ``#cloud-init`` on Libera irc if you
have any questions.
.. _tox: https://tox.readthedocs.io/en/latest/
diff --git a/README.md b/README.md
index 01fd3b07..5828c2fa 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# cloud-init
-[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=master)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
+[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=main)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
Cloud-init is the *industry standard* multi-distribution method for
cross-platform cloud instance initialization. It is supported across all
@@ -26,7 +26,7 @@ If you need support, start with the [user documentation](https://cloudinit.readt
If you need additional help consider reaching out with one of the following options:
-- Ask a question in the [``#cloud-init`` IRC channel on Freenode](https://webchat.freenode.net/?channel=#cloud-init)
+- Ask a question in the [``#cloud-init`` IRC channel on Libera](https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init)
- Search the cloud-init [mailing list archive](https://lists.launchpad.net/cloud-init/)
- Better yet, join the [cloud-init mailing list](https://launchpad.net/~cloud-init) and participate
- Find a bug? [Report bugs on Launchpad](https://bugs.launchpad.net/cloud-init/+filebug)
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
index a9577e9d..b9f137b1 100644
--- a/bash_completion/cloud-init
+++ b/bash_completion/cloud-init
@@ -28,7 +28,7 @@ _cloudinit_complete()
COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
;;
devel)
- COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word))
+ COMPREPLY=($(compgen -W "--help hotplug-hook schema net-convert" -- $cur_word))
;;
dhclient-hook)
COMPREPLY=($(compgen -W "--help up down" -- $cur_word))
@@ -64,6 +64,9 @@ _cloudinit_complete()
--frequency)
COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
;;
+ hotplug-hook)
+ COMPREPLY=($(compgen -W "--help" -- $cur_word))
+ ;;
net-convert)
COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word))
;;
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
new file mode 100644
index 00000000..a0058f03
--- /dev/null
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -0,0 +1,279 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Handle reconfiguration on hotplug events"""
+import abc
+import argparse
+import os
+import sys
+import time
+
+from cloudinit import log
+from cloudinit import reporting
+from cloudinit.event import EventScope, EventType
+from cloudinit.net import activators, read_sys_net_safe
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.reporting import events
+from cloudinit.stages import Init
+from cloudinit.sources import DataSource, DataSourceNotFoundException
+
+
+LOG = log.getLogger(__name__)
+NAME = 'hotplug-hook'
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for hotplug-hook utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+
+ parser.description = __doc__
+ parser.add_argument(
+ "-s", "--subsystem", required=True,
+ help="subsystem to act on",
+ choices=['net']
+ )
+
+ subparsers = parser.add_subparsers(
+ title='Hotplug Action',
+ dest='hotplug_action'
+ )
+ subparsers.required = True
+
+ subparsers.add_parser(
+ 'query',
+ help='query if hotplug is enabled for given subsystem'
+ )
+
+ parser_handle = subparsers.add_parser(
+ 'handle', help='handle the hotplug event')
+ parser_handle.add_argument(
+ "-d", "--devpath", required=True,
+ metavar="PATH",
+ help="sysfs path to hotplugged device"
+ )
+ parser_handle.add_argument(
+ "-u", "--udevaction", required=True,
+ help="action to take",
+ choices=['add', 'remove']
+ )
+
+ return parser
+
+
+class UeventHandler(abc.ABC):
+ def __init__(self, id, datasource, devpath, action, success_fn):
+ self.id = id
+ self.datasource = datasource # type: DataSource
+ self.devpath = devpath
+ self.action = action
+ self.success_fn = success_fn
+
+ @abc.abstractmethod
+ def apply(self):
+ raise NotImplementedError()
+
+ @property
+ @abc.abstractmethod
+ def config(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def device_detected(self) -> bool:
+ raise NotImplementedError()
+
+ def detect_hotplugged_device(self):
+ detect_presence = None
+ if self.action == 'add':
+ detect_presence = True
+ elif self.action == 'remove':
+ detect_presence = False
+ else:
+ raise ValueError('Unknown action: %s' % self.action)
+
+ if detect_presence != self.device_detected():
+ raise RuntimeError(
+ 'Failed to detect %s in updated metadata' % self.id)
+
+ def success(self):
+ return self.success_fn()
+
+ def update_metadata(self):
+ result = self.datasource.update_metadata_if_supported([
+ EventType.HOTPLUG])
+ if not result:
+ raise RuntimeError(
+ 'Datasource %s not updated for '
+ 'event %s' % (self.datasource, EventType.HOTPLUG)
+ )
+ return result
+
+
+class NetHandler(UeventHandler):
+ def __init__(self, datasource, devpath, action, success_fn):
+ # convert devpath to mac address
+ id = read_sys_net_safe(os.path.basename(devpath), 'address')
+ super().__init__(id, datasource, devpath, action, success_fn)
+
+ def apply(self):
+ self.datasource.distro.apply_network_config(
+ self.config,
+ bring_up=False,
+ )
+ interface_name = os.path.basename(self.devpath)
+ activator = activators.select_activator()
+ if self.action == 'add':
+ if not activator.bring_up_interface(interface_name):
+ raise RuntimeError(
+ 'Failed to bring up device: {}'.format(self.devpath))
+ elif self.action == 'remove':
+ if not activator.bring_down_interface(interface_name):
+ raise RuntimeError(
+ 'Failed to bring down device: {}'.format(self.devpath))
+
+ @property
+ def config(self):
+ return self.datasource.network_config
+
+ def device_detected(self) -> bool:
+ netstate = parse_net_config_data(self.config)
+ found = [
+ iface for iface in netstate.iter_interfaces()
+ if iface.get('mac_address') == self.id
+ ]
+ LOG.debug('Ifaces with ID=%s : %s', self.id, found)
+ return len(found) > 0
+
+
+SUBSYSTEM_PROPERTES_MAP = {
+ 'net': (NetHandler, EventScope.NETWORK),
+}
+
+
+def is_enabled(hotplug_init, subsystem):
+ try:
+ scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
+ except KeyError as e:
+ raise Exception(
+ 'hotplug-hook: cannot handle events for subsystem: {}'.format(
+ subsystem)
+ ) from e
+
+ return hotplug_init.update_event_enabled(
+ event_source_type=EventType.HOTPLUG,
+ scope=scope
+ )
+
+
+def initialize_datasource(hotplug_init, subsystem):
+ LOG.debug('Fetching datasource')
+ datasource = hotplug_init.fetch(existing="trust")
+
+ if not datasource.get_supported_events([EventType.HOTPLUG]):
+ LOG.debug('hotplug not supported for event of type %s', subsystem)
+ return
+
+ if not is_enabled(hotplug_init, subsystem):
+ LOG.debug('hotplug not enabled for event of type %s', subsystem)
+ return
+ return datasource
+
+
+def handle_hotplug(
+ hotplug_init: Init, devpath, subsystem, udevaction
+):
+ datasource = initialize_datasource(hotplug_init, subsystem)
+ if not datasource:
+ return
+ handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0]
+ LOG.debug('Creating %s event handler', subsystem)
+ event_handler = handler_cls(
+ datasource=datasource,
+ devpath=devpath,
+ action=udevaction,
+ success_fn=hotplug_init._write_to_cache
+ ) # type: UeventHandler
+ wait_times = [1, 3, 5, 10, 30]
+ for attempt, wait in enumerate(wait_times):
+ LOG.debug(
+ 'subsystem=%s update attempt %s/%s',
+ subsystem,
+ attempt,
+ len(wait_times)
+ )
+ try:
+ LOG.debug('Refreshing metadata')
+ event_handler.update_metadata()
+ LOG.debug('Detecting device in updated metadata')
+ event_handler.detect_hotplugged_device()
+ LOG.debug('Applying config change')
+ event_handler.apply()
+ LOG.debug('Updating cache')
+ event_handler.success()
+ break
+ except Exception as e:
+ LOG.debug('Exception while processing hotplug event. %s', e)
+ time.sleep(wait)
+ last_exception = e
+ else:
+ raise last_exception # type: ignore
+
+
+def handle_args(name, args):
+ # Note that if an exception happens between now and when logging is
+ # setup, we'll only see it in the journal
+ hotplug_reporter = events.ReportEventStack(
+ name, __doc__, reporting_enabled=True
+ )
+
+ hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter)
+ hotplug_init.read_cfg()
+
+ log.setupLogging(hotplug_init.cfg)
+ if 'reporting' in hotplug_init.cfg:
+ reporting.update_configuration(hotplug_init.cfg.get('reporting'))
+ # Logging isn't going to be setup until now
+ LOG.debug(
+ '%s called with the following arguments: {'
+ 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}',
+ name,
+ args.hotplug_action,
+ args.subsystem,
+ args.udevaction if 'udevaction' in args else None,
+ args.devpath if 'devpath' in args else None,
+ )
+
+ with hotplug_reporter:
+ try:
+ if args.hotplug_action == 'query':
+ try:
+ datasource = initialize_datasource(
+ hotplug_init, args.subsystem)
+ except DataSourceNotFoundException:
+ print(
+ "Unable to determine hotplug state. No datasource "
+ "detected")
+ sys.exit(1)
+ print('enabled' if datasource else 'disabled')
+ else:
+ handle_hotplug(
+ hotplug_init=hotplug_init,
+ devpath=args.devpath,
+ subsystem=args.subsystem,
+ udevaction=args.udevaction,
+ )
+ except Exception:
+ LOG.exception('Received fatal exception handling hotplug!')
+ raise
+
+ LOG.debug('Exiting hotplug handler')
+ reporting.flush_events()
+
+
+if __name__ == '__main__':
+ args = get_parser().parse_args()
+ handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 51c61cca..31ade73d 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -48,11 +48,15 @@ def get_parser(parser=None):
return parser
-def _copytree_ignore_sensitive_files(curdir, files):
- """Return a list of files to ignore if we are non-root"""
- if os.getuid() == 0:
- return ()
- return (INSTANCE_JSON_SENSITIVE_FILE,) # Ignore root-permissioned files
+def _copytree_rundir_ignore_files(curdir, files):
+ """Return a list of files to ignore for /run/cloud-init directory"""
+ ignored_files = [
+ 'hook-hotplug-cmd', # named pipe for hotplug
+ ]
+ if os.getuid() != 0:
+ # Ignore root-permissioned files
+ ignored_files.append(INSTANCE_JSON_SENSITIVE_FILE)
+ return ignored_files
def _write_command_output_to_file(cmd, filename, msg, verbosity):
@@ -123,9 +127,13 @@ def collect_logs(tarfile, include_userdata, verbosity=0):
run_dir = os.path.join(log_dir, 'run')
ensure_dir(run_dir)
if os.path.exists(CLOUDINIT_RUN_DIR):
- shutil.copytree(CLOUDINIT_RUN_DIR,
- os.path.join(run_dir, 'cloud-init'),
- ignore=_copytree_ignore_sensitive_files)
+ try:
+ shutil.copytree(CLOUDINIT_RUN_DIR,
+ os.path.join(run_dir, 'cloud-init'),
+ ignore=_copytree_rundir_ignore_files)
+ except shutil.Error as e:
+ sys.stderr.write("Failed collecting file(s) due to error:\n")
+ sys.stderr.write(str(e) + '\n')
_debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
else:
_debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 0668ffa3..f4a98e5e 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -11,7 +11,7 @@ from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
from cloudinit import distros, safeyaml
-from cloudinit.net import eni, netplan, network_state, sysconfig
+from cloudinit.net import eni, netplan, networkd, network_state, sysconfig
from cloudinit import log
NAME = 'net-convert'
@@ -51,7 +51,7 @@ def get_parser(parser=None):
parser.add_argument("--debug", action='store_true',
help='enable debug logging to stderr.')
parser.add_argument("-O", "--output-kind",
- choices=['eni', 'netplan', 'sysconfig'],
+ choices=['eni', 'netplan', 'networkd', 'sysconfig'],
required=True,
help="The network config format to emit")
return parser
@@ -96,9 +96,6 @@ def handle_args(name, args):
pre_ns = ovf.get_network_config_from_conf(config, False)
ns = network_state.parse_net_config_data(pre_ns)
- if not ns:
- raise RuntimeError("No valid network_state object created from"
- " input data")
if args.debug:
sys.stderr.write('\n'.join(
@@ -118,9 +115,14 @@ def handle_args(name, args):
config['netplan_path'] = config['netplan_path'][1:]
# enable some netplan features
config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
- else:
+ elif args.output_kind == "networkd":
+ r_cls = networkd.Renderer
+ config = distro.renderer_configs.get('networkd')
+ elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
config = distro.renderer_configs.get('sysconfig')
+ else:
+ raise RuntimeError("Invalid output_kind")
r = r_cls(config=config)
sys.stderr.write(''.join([
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 1a3c46a4..be304630 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -7,6 +7,7 @@
import argparse
from cloudinit.config import schema
+from . import hotplug_hook
from . import net_convert
from . import render
from . import make_mime
@@ -21,6 +22,8 @@ def get_parser(parser=None):
subparsers.required = True
subcmds = [
+ (hotplug_hook.NAME, hotplug_hook.__doc__,
+ hotplug_hook.get_parser, hotplug_hook.handle_args),
('schema', 'Validate cloud-config files for document schema',
schema.get_parser, schema.handle_schema_args),
(net_convert.NAME, net_convert.__doc__,
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index baf1381f..1de1de99 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -19,7 +19,7 @@ import time
import traceback
from cloudinit import patcher
-patcher.patch() # noqa
+patcher.patch_logging()
from cloudinit import log as logging
from cloudinit import netinfo
@@ -210,6 +210,35 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
(cmdline_name, url, path))
+def purge_cache_on_python_version_change(init):
+ """Purge the cache if python version changed on us.
+
+ There could be changes not represented in our cache (obj.pkl) after we
+ upgrade to a new version of python, so at that point clear the cache
+ """
+ current_python_version = '%d.%d' % (
+ sys.version_info.major, sys.version_info.minor
+ )
+ python_version_path = os.path.join(
+ init.paths.get_cpath('data'), 'python-version'
+ )
+ if os.path.exists(python_version_path):
+ cached_python_version = open(python_version_path).read()
+ # The Python version has changed out from under us, anything that was
+ # pickled previously is likely useless due to API changes.
+ if cached_python_version != current_python_version:
+ LOG.debug('Python version change detected. Purging cache')
+ init.purge_cache(True)
+ util.write_file(python_version_path, current_python_version)
+ else:
+ if os.path.exists(init.paths.get_ipath_cur('obj_pkl')):
+ LOG.info(
+ 'Writing python-version file. '
+ 'Cache compatibility status is currently unknown.'
+ )
+ util.write_file(python_version_path, current_python_version)
+
+
def main_init(name, args):
deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
if args.local:
@@ -276,6 +305,7 @@ def main_init(name, args):
util.logexc(LOG, "Failed to initialize, likely bad things to come!")
# Stage 4
path_helper = init.paths
+ purge_cache_on_python_version_change(init)
mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
if mode == sources.DSMODE_NETWORK:
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 78b27441..1f5975b0 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -17,6 +17,8 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
class TestMain(FilesystemMockingTestCase):
+ with_logs = True
+ allowed_subp = False
def setUp(self):
super(TestMain, self).setUp()
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index bb8a1278..0c9c7925 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -57,6 +57,15 @@ mirror_property = {
},
'search_dns': {
'type': 'boolean',
+ },
+ 'keyid': {
+ 'type': 'string'
+ },
+ 'key': {
+ 'type': 'string'
+ },
+ 'keyserver': {
+ 'type': 'string'
}
}
}
@@ -228,6 +237,15 @@ schema = {
key, the search pattern will be
``<distro>-security-mirror``.
+ Each mirror may also specify a key to import via
+ any of the following optional keys:
+
+ - ``keyid``: a key to import via shortid or \
+ fingerprint.
+ - ``key``: a raw PGP key.
+ - ``keyserver``: alternate keyserver to pull \
+ ``keyid`` key from.
+
If no mirrors are specified, or all lookups fail,
then default mirrors defined in the datasource
are used. If none are present in the datasource
@@ -453,6 +471,7 @@ def apply_apt(cfg, cloud, target):
LOG.debug("Apt Mirror info: %s", mirrors)
if util.is_false(cfg.get('preserve_sources_list', False)):
+ add_mirror_keys(cfg, target)
generate_sources_list(cfg, release, mirrors, cloud)
rename_apt_lists(mirrors, target, arch)
@@ -660,6 +679,13 @@ def disable_suites(disabled, src, release):
return retsrc
+def add_mirror_keys(cfg, target):
+ """Adds any keys included in the primary/security mirror clauses"""
+ for key in ('primary', 'security'):
+ for mirror in cfg.get(key, []):
+ add_apt_key(mirror, target)
+
+
def generate_sources_list(cfg, release, mirrors, cloud):
"""generate_sources_list
create a source.list file based on a custom or default template
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 22af3813..3ec49ca5 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -125,9 +125,15 @@ def handle(_name, cfg, cloud, log, _args):
See doc/examples/cloud-config-disk-setup.txt for documentation on the
format.
"""
+ device_aliases = cfg.get("device_aliases", {})
+
+ def alias_to_device(cand):
+ name = device_aliases.get(cand)
+ return cloud.device_name_to_device(name or cand) or name
+
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
- update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
+ update_disk_setup_devices(disk_setup, alias_to_device)
log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
@@ -145,7 +151,7 @@ def handle(_name, cfg, cloud, log, _args):
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
log.debug("setting up filesystems: %s", str(fs_setup))
- update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
+ update_fs_setup_devices(fs_setup, alias_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warning("Invalid file system definition: %s" % definition)
@@ -174,7 +180,8 @@ def update_disk_setup_devices(disk_setup, tformer):
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
- disk_setup[transformed]['_origname'] = origname
+ if isinstance(disk_setup[transformed], dict):
+ disk_setup[transformed]['_origname'] = origname
del disk_setup[origname]
LOG.debug("updated disk_setup device entry '%s' to '%s'",
origname, transformed)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 9f338ad1..9f5525a1 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -224,6 +224,10 @@ def device_part_info(devpath):
freebsd_part = "/dev/" + util.find_freebsd_part(devpath)
m = re.search('^(/dev/.+)p([0-9])$', freebsd_part)
return (m.group(1), m.group(2))
+ elif util.is_DragonFlyBSD():
+ dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath)
+ m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part)
+ return (m.group(1), m.group(2))
if not os.path.exists(syspath):
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index c22d1698..eeb008d2 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -123,7 +123,7 @@ def _is_block_device(device_path, partition_path=None):
return os.path.exists(sys_path)
-def sanitize_devname(startname, transformer, log):
+def sanitize_devname(startname, transformer, log, aliases=None):
log.debug("Attempting to determine the real name of %s", startname)
# workaround, allow user to specify 'ephemeral'
@@ -137,9 +137,14 @@ def sanitize_devname(startname, transformer, log):
return startname
device_path, partition_number = util.expand_dotted_devname(devname)
+ orig = device_path
+
+ if aliases:
+ device_path = aliases.get(device_path, device_path)
+ if orig != device_path:
+ log.debug("Mapped device alias %s to %s", orig, device_path)
if is_meta_device_name(device_path):
- orig = device_path
device_path = transformer(device_path)
if not device_path:
return None
@@ -394,6 +399,8 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs[toks[0]] = line
fstab_lines.append(line)
+ device_aliases = cfg.get("device_aliases", {})
+
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
@@ -402,7 +409,8 @@ def handle(_name, cfg, cloud, log, _args):
continue
start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(start, cloud.device_name_to_device, log,
+ aliases=device_aliases)
if sanitized != start:
log.debug("changed %s => %s" % (start, sanitized))
@@ -444,7 +452,8 @@ def handle(_name, cfg, cloud, log, _args):
# entry has the same device name
for defmnt in defmnts:
start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(start, cloud.device_name_to_device, log,
+ aliases=device_aliases)
if sanitized != start:
log.debug("changed default device %s => %s" % (start, sanitized))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 41c278ff..7c371a49 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -24,8 +24,9 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
-distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse',
- 'rhel', 'sles', 'ubuntu']
+distros = ['almalinux', 'alpine', 'centos', 'debian', 'eurolinux', 'fedora',
+ 'opensuse', 'photon', 'rhel', 'rocky', 'sles', 'ubuntu',
+ 'virtuozzo']
NTP_CLIENT_CONFIG = {
'chrony': {
@@ -80,24 +81,37 @@ DISTRO_CLIENT_CONFIG = {
'confpath': '/etc/chrony/chrony.conf',
},
},
- 'rhel': {
+ 'opensuse': {
+ 'chrony': {
+ 'service_name': 'chronyd',
+ },
'ntp': {
+ 'confpath': '/etc/ntp.conf',
'service_name': 'ntpd',
},
- 'chrony': {
- 'service_name': 'chronyd',
+ 'systemd-timesyncd': {
+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
},
},
- 'opensuse': {
+ 'photon': {
'chrony': {
'service_name': 'chronyd',
},
'ntp': {
- 'confpath': '/etc/ntp.conf',
'service_name': 'ntpd',
+ 'confpath': '/etc/ntp.conf'
},
'systemd-timesyncd': {
'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ 'confpath': '/etc/systemd/timesyncd.conf',
+ },
+ },
+ 'rhel': {
+ 'ntp': {
+ 'service_name': 'ntpd',
+ },
+ 'chrony': {
+ 'service_name': 'chronyd',
},
},
'sles': {
@@ -392,9 +406,9 @@ def generate_server_names(distro):
# For legal reasons x.pool.sles.ntp.org does not exist,
# use the opensuse pool
pool_distro = 'opensuse'
- elif distro == 'alpine':
+ elif distro == 'alpine' or distro == 'eurolinux':
# Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
- # so use general x.pool.ntp.org instead.
+ # so use general x.pool.ntp.org instead. The same applies to EuroLinux
pool_distro = ''
for x in range(0, NR_POOL_SERVERS):
@@ -551,7 +565,6 @@ def handle(name, cfg, cloud, log, _args):
# Select which client is going to be used and get the configuration
ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
cloud.distro)
-
# Allow user ntp config to override distro configurations
ntp_client_config = util.mergemanydict(
[ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index bc981cf4..a0779eb0 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -29,22 +29,41 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
ones that work with puppet 3.x and with distributions that ship modified
puppet 4.x that uses the old paths.
+Agent packages from the puppetlabs repositories can be installed by setting
+``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR
+paths will be adjusted accordingly. To maintain backwards compatibility this
+setting defaults to ``packages`` which will install puppet from the distro
+packages.
+
+If installing ``aio`` packages, ``collection`` can also be set to one of
+``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly
+counterparts) in order to install specific release streams. By default, the
+puppetlabs repository will be purged after installation finishes; set
+``cleanup`` to ``false`` to prevent this. AIO packages are installed through a
+shell script which is downloaded on the machine and then executed; the path to
+this script can be overridden using the ``aio_install_url`` key.
+
Puppet configuration can be specified under the ``conf`` key. The
configuration is specified as a dictionary containing high-level ``<section>``
keys and lists of ``<key>=<value>`` pairs within each section. Each section
name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
-such, section names should be one of: ``main``, ``master``, ``agent`` or
+such, section names should be one of: ``main``, ``server``, ``agent`` or
``user`` and keys should be valid puppet configuration options. The
``certname`` key supports string substitutions for ``%i`` and ``%f``,
corresponding to the instance id and fqdn of the machine respectively.
If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
-instead will be used as the puppermaster certificate. It should be specified
+instead will be used as the puppetserver certificate. It should be specified
in pem format as a multi-line string (using the ``|`` yaml notation).
-Additionally it's possible to create a csr_attributes.yaml for
-CSR attributes and certificate extension requests.
+Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR
+attributes and certificate extension requests.
See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
+The puppet service will be automatically enabled after installation. A manual
+run can also be triggered by setting ``exec`` to ``true``, and additional
+arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by
+default the agent will execute with the ``--test`` flag).
+
**Internal name:** ``cc_puppet``
**Module frequency:** per instance
@@ -56,13 +75,19 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
puppet:
install: <true/false>
version: <version>
+ collection: <aio collection>
+ install_type: <packages/aio>
+ aio_install_url: 'https://git.io/JBhoQ'
+ cleanup: <true/false>
conf_file: '/etc/puppet/puppet.conf'
ssl_dir: '/var/lib/puppet/ssl'
csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
package_name: 'puppet'
+ exec: <true/false>
+ exec_args: ['--test']
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
certname: "%i.%f"
ca_cert: |
-------BEGIN CERTIFICATE-------
@@ -84,12 +109,12 @@ from io import StringIO
from cloudinit import helpers
from cloudinit import subp
+from cloudinit import temp_utils
from cloudinit import util
+from cloudinit import url_helper
-PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
-PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
-PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml'
-PUPPET_PACKAGE_NAME = 'puppet'
+AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501
+PUPPET_AGENT_DEFAULT_ARGS = ['--test']
class PuppetConstants(object):
@@ -119,6 +144,43 @@ def _autostart_puppet(log):
" puppet services on this system"))
+def get_config_value(puppet_bin, setting):
+ """Get the config value for a given setting using `puppet config print`
+ :param puppet_bin: path to puppet binary
+ :param setting: setting to query
+ """
+ out, _ = subp.subp([puppet_bin, 'config', 'print', setting])
+ return out.rstrip()
+
+
+def install_puppet_aio(url=AIO_INSTALL_URL, version=None,
+ collection=None, cleanup=True):
+ """Install puppet-agent from the puppetlabs repositories using the one-shot
+ shell script
+
+ :param url: URL from where to download the install script
+ :param version: version to install, blank defaults to latest
+ :param collection: collection to install, blank defaults to latest
+ :param cleanup: whether to purge the puppetlabs repo after installation
+ """
+ args = []
+ if version is not None:
+ args = ['-v', version]
+ if collection is not None:
+ args += ['-c', collection]
+
+ # Purge puppetlabs repos after installation
+ if cleanup:
+ args += ['--cleanup']
+ content = url_helper.readurl(url=url, retries=5).contents
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, 'puppet-install')
+ util.write_file(tmpf, content, mode=0o700)
+ return subp.subp([tmpf] + args, capture=False)
+
+
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
if 'puppet' not in cfg:
@@ -130,23 +192,50 @@ def handle(name, cfg, cloud, log, _args):
# Start by installing the puppet package if necessary...
install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
version = util.get_cfg_option_str(puppet_cfg, 'version', None)
- package_name = util.get_cfg_option_str(
- puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME)
- conf_file = util.get_cfg_option_str(
- puppet_cfg, 'conf_file', PUPPET_CONF_PATH)
- ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR)
- csr_attributes_path = util.get_cfg_option_str(
- puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH)
+ collection = util.get_cfg_option_str(puppet_cfg, 'collection', None)
+ install_type = util.get_cfg_option_str(
+ puppet_cfg, 'install_type', 'packages')
+ cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True)
+ run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False)
+ aio_install_url = util.get_cfg_option_str(
+ puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL)
- p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
+ # AIO and distro packages use different paths
+ if install_type == 'aio':
+ puppet_user = 'root'
+ puppet_bin = '/opt/puppetlabs/bin/puppet'
+ puppet_package = 'puppet-agent'
+ else: # default to 'packages'
+ puppet_user = 'puppet'
+ puppet_bin = 'puppet'
+ puppet_package = 'puppet'
+
+ package_name = util.get_cfg_option_str(
+ puppet_cfg, 'package_name', puppet_package)
if not install and version:
- log.warning(("Puppet install set false but version supplied,"
+ log.warning(("Puppet install set to false but version supplied,"
" doing nothing."))
elif install:
- log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
+ log.debug(("Attempting to install puppet %s from %s"),
+ version if version else 'latest', install_type)
- cloud.distro.install_packages((package_name, version))
+ if install_type == "packages":
+ cloud.distro.install_packages((package_name, version))
+ elif install_type == "aio":
+ install_puppet_aio(aio_install_url, version, collection, cleanup)
+ else:
+ log.warning("Unknown puppet install type '%s'", install_type)
+ run = False
+
+ conf_file = util.get_cfg_option_str(
+ puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config'))
+ ssl_dir = util.get_cfg_option_str(
+ puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir'))
+ csr_attributes_path = util.get_cfg_option_str(
+ puppet_cfg, 'csr_attributes_path',
+ get_config_value(puppet_bin, 'csr_attributes'))
+
+ p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
# ... and then update the puppet configuration
if 'conf' in puppet_cfg:
@@ -165,17 +254,18 @@ def handle(name, cfg, cloud, log, _args):
source=p_constants.conf_path)
for (cfg_name, cfg) in puppet_cfg['conf'].items():
# Cert configuration is a special case
- # Dump the puppet master ca certificate in the correct place
+ # Dump the puppetserver ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
util.ensure_dir(p_constants.ssl_dir, 0o771)
- util.chownbyname(p_constants.ssl_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_dir, puppet_user, 'root')
util.ensure_dir(p_constants.ssl_cert_dir)
- util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root')
util.write_file(p_constants.ssl_cert_path, cfg)
- util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_cert_path,
+ puppet_user, 'root')
else:
# Iterate through the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
@@ -203,6 +293,25 @@ def handle(name, cfg, cloud, log, _args):
# Set it up so it autostarts
_autostart_puppet(log)
+ # Run the agent if needed
+ if run:
+ log.debug('Running puppet-agent')
+ cmd = [puppet_bin, 'agent']
+ if 'exec_args' in puppet_cfg:
+ cmd_args = puppet_cfg['exec_args']
+ if isinstance(cmd_args, (list, tuple)):
+ cmd.extend(cmd_args)
+ elif isinstance(cmd_args, str):
+ cmd.extend(cmd_args.split())
+ else:
+ log.warning("Unknown type %s provided for puppet"
+ " 'exec_args' expected list, tuple,"
+ " or string", type(cmd_args))
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ else:
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ subp.subp(cmd, capture=False)
+
# Start puppetd
subp.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 9afbb847..990a6939 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -85,6 +85,10 @@ def _resize_zfs(mount_point, devpth):
return ('zpool', 'online', '-e', mount_point, devpth)
+def _resize_hammer2(mount_point, devpth):
+ return ('hammer2', 'growfs', mount_point)
+
+
def _can_skip_resize_ufs(mount_point, devpth):
# possible errors cases on the code-path to growfs -N following:
# https://github.com/freebsd/freebsd/blob/HEAD/sbin/growfs/growfs.c
@@ -113,6 +117,7 @@ RESIZE_FS_PREFIXES_CMDS = [
('xfs', _resize_xfs),
('ufs', _resize_ufs),
('zfs', _resize_zfs),
+ ('hammer2', _resize_hammer2),
]
RESIZE_FS_PRECHECK_CMDS = {
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 466dad03..648935e4 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -30,7 +30,7 @@ are configured correctly.
**Module frequency:** per instance
-**Supported distros:** alpine, fedora, rhel, sles
+**Supported distros:** alpine, fedora, photon, rhel, sles
**Config keys**::
@@ -47,18 +47,23 @@ are configured correctly.
"""
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
+from cloudinit.settings import PER_INSTANCE
from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles']
+distros = ['alpine', 'fedora', 'opensuse', 'photon', 'rhel', 'sles']
+RESOLVE_CONFIG_TEMPLATE_MAP = {
+ '/etc/resolv.conf': 'resolv.conf',
+ '/etc/systemd/resolved.conf': 'systemd.resolved.conf',
+}
-def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
+
+def generate_resolv_conf(template_fn, params, target_fname):
flags = []
false_flags = []
@@ -103,13 +108,20 @@ def handle(name, cfg, cloud, log, _args):
if "resolv_conf" not in cfg:
log.warning("manage_resolv_conf True but no parameters provided!")
+ return
- template_fn = cloud.get_template_filename('resolv.conf')
- if not template_fn:
- log.warning("No template found, not rendering /etc/resolv.conf")
+ try:
+ template_fn = cloud.get_template_filename(
+ RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn])
+ except KeyError:
+ log.warning("No template found, not rendering resolve configs")
return
- generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
+ generate_resolv_conf(
+ template_fn=template_fn,
+ params=cfg["resolv_conf"],
+ target_fname=cloud.distro.resolve_conf_fn
+ )
return
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index db513ed7..b7a48dcc 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,8 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** almalinux, centos, fedora, rhel
+**Supported distros:** almalinux, centos, eurolinux, fedora, photon, rhel,
+ rocky, virtuozzo
**Config keys**::
@@ -36,7 +37,8 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['almalinux', 'centos', 'fedora', 'rhel']
+distros = ['almalinux', 'centos', 'eurolinux', 'fedora', 'photon', 'rhel',
+ 'rocky', 'virtuozzo']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
index 6546a0b5..45a06c22 100644
--- a/cloudinit/config/tests/test_resolv_conf.py
+++ b/cloudinit/config/tests/test_resolv_conf.py
@@ -1,9 +1,8 @@
-from unittest import mock
-
import pytest
+from unittest import mock
from cloudinit.config.cc_resolv_conf import generate_resolv_conf
-
+from tests.unittests.test_distros.test_create_users import MyBaseDistro
EXPECTED_HEADER = """\
# Your system has been configured with 'manage-resolv-conf' set to true.
@@ -14,22 +13,28 @@ EXPECTED_HEADER = """\
class TestGenerateResolvConf:
+
+ dist = MyBaseDistro()
+ tmpl_fn = "templates/resolv.conf.tmpl"
+
@mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
- def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
- generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
+ def test_dist_resolv_conf_fn(self, m_render_to_file):
+ self.dist.resolve_conf_fn = "/tmp/resolv-test.conf"
+ generate_resolv_conf(self.tmpl_fn,
+ mock.MagicMock(),
+ self.dist.resolve_conf_fn)
assert [
- mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY)
] == m_render_to_file.call_args_list
@mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
def test_target_fname_is_used_if_passed(self, m_render_to_file):
- generate_resolv_conf(
- "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
- )
+ path = "/use/this/path"
+ generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path)
assert [
- mock.call(mock.ANY, "/use/this/path", mock.ANY)
+ mock.call(mock.ANY, path, mock.ANY)
] == m_render_to_file.call_args_list
# Patch in templater so we can assert on the actual generated content
@@ -75,7 +80,8 @@ class TestGenerateResolvConf:
def test_flags_and_options(
self, m_write_file, params, expected_extra_line
):
- generate_resolv_conf("templates/resolv.conf.tmpl", params)
+ target_fn = "/etc/resolv.conf"
+ generate_resolv_conf(self.tmpl_fn, params, target_fn)
expected_content = EXPECTED_HEADER
if expected_extra_line is not None:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 107b928c..a634623a 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -16,13 +16,16 @@ import stat
import string
import urllib.parse
from io import StringIO
+from typing import Any, Mapping
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.net import activators
from cloudinit.net import eni
from cloudinit.net import network_state
from cloudinit.net import renderers
+from cloudinit.net.network_state import parse_net_config_data
from cloudinit import persistence
from cloudinit import ssh_util
from cloudinit import type_utils
@@ -46,7 +49,8 @@ OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'freebsd': ['freebsd'],
'gentoo': ['gentoo'],
- 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'rhel'],
+ 'redhat': ['almalinux', 'amazon', 'centos', 'eurolinux', 'fedora',
+ 'photon', 'rhel', 'rocky', 'virtuozzo'],
'suse': ['opensuse', 'sles'],
}
@@ -71,7 +75,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
hostname_conf_fn = "/etc/hostname"
tz_zone_dir = "/usr/share/zoneinfo"
init_cmd = ['service'] # systemctl, service etc
- renderer_configs = {}
+ renderer_configs = {} # type: Mapping[str, Mapping[str, Any]]
_preferred_ntp_clients = None
networking_cls = LinuxNetworking
# This is used by self.shutdown_command(), and can be overridden in
@@ -80,6 +84,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
_ci_pkl_version = 1
prefer_fqdn = False
+ resolve_conf_fn = "/etc/resolv.conf"
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -104,14 +109,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
raise NotImplementedError()
def _write_network(self, settings):
- raise RuntimeError(
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
+ raise NotImplementedError(
"Legacy function '_write_network' was called in distro '%s'.\n"
"_write_network_config needs implementation.\n" % self.name)
- def _write_network_config(self, settings):
- raise NotImplementedError()
-
- def _supported_write_network_config(self, network_config):
+ def _write_network_state(self, network_state):
priority = util.get_cfg_by_path(
self._cfg, ('network', 'renderers'), None)
@@ -119,8 +122,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG.debug("Selected renderer '%s' from priority list: %s",
name, priority)
renderer = render_cls(config=self.renderer_configs.get(name))
- renderer.render_network_config(network_config)
- return []
+ renderer.render_network_state(network_state)
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
@@ -145,7 +147,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return uses_systemd()
@abc.abstractmethod
- def package_command(self, cmd, args=None, pkgs=None):
+ def package_command(self, command, args=None, pkgs=None):
raise NotImplementedError()
@abc.abstractmethod
@@ -172,6 +174,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
# this applies network where 'settings' is interfaces(5) style
# it is obsolete compared to apply_network_config
# Write it out
@@ -186,6 +189,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return False
def _apply_network_from_network_config(self, netconfig, bring_up=True):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
distro = self.__class__
LOG.warning("apply_network_config is not currently implemented "
"for distribution '%s'. Attempting to use apply_network",
@@ -202,12 +206,20 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
def generate_fallback_config(self):
return net.generate_fallback_config()
- def apply_network_config(self, netconfig, bring_up=False):
- # apply network config netconfig
+ def apply_network_config(self, netconfig, bring_up=False) -> bool:
+ """Apply the network config.
+
+ If bring_up is True, attempt to bring up the passed in devices. If
+ devices is None, attempt to bring up devices returned by
+ _write_network_config.
+
+ Returns True if any devices failed to come up, otherwise False.
+ """
# This method is preferred to apply_network which only takes
# a much less complete network config format (interfaces(5)).
+ network_state = parse_net_config_data(netconfig)
try:
- dev_names = self._write_network_config(netconfig)
+ self._write_network_state(network_state)
except NotImplementedError:
# backwards compat until all distros have apply_network_config
return self._apply_network_from_network_config(
@@ -215,7 +227,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Now try to bring them up
if bring_up:
- return self._bring_up_interfaces(dev_names)
+ network_activator = activators.select_activator()
+ network_activator.bring_up_all_interfaces(network_state)
return False
def apply_network_config_names(self, netconfig):
@@ -391,20 +404,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return self._preferred_ntp_clients
def _bring_up_interface(self, device_name):
- cmd = ['ifup', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = subp.subp(cmd)
- if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
- return True
- except subp.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
+ raise NotImplementedError
def _bring_up_interfaces(self, device_names):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
am_failed = 0
for d in device_names:
if not self._bring_up_interface(d):
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
index ca5bfe80..73b68baf 100644
--- a/cloudinit/distros/alpine.py
+++ b/cloudinit/distros/alpine.py
@@ -73,31 +73,18 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('add', pkgs=pkglist)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '-a')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index f8385f7f..3c5bbb38 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -25,7 +25,6 @@ LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
locale_gen_fn = "/etc/locale.gen"
network_conf_dir = "/etc/netctl"
- resolve_conf_fn = "/etc/resolv.conf"
init_cmd = ['systemctl'] # init scripts
renderer_configs = {
"netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml",
@@ -62,9 +61,9 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('', pkgs=pkglist)
- def _write_network_config(self, netconfig):
+ def _write_network_state(self, network_state):
try:
- return self._supported_write_network_config(netconfig)
+ super()._write_network_state(network_state)
except RendererNotFoundError as e:
# Fall back to old _write_network
raise NotImplementedError from e
@@ -102,24 +101,18 @@ class Distro(distros.Distro):
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
- def _bring_up_interfaces(self, device_names):
- for d in device_names:
- if not self._bring_up_interface(d):
- return False
- return True
-
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), omode="w", mode=0o644)
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), omode="w", mode=0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index f717a667..c2fc1e0b 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -120,9 +120,6 @@ class BSD(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 844aaf21..f2b4dfc9 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -111,32 +111,22 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('install', pkgs=pkglist)
- def _write_network_config(self, netconfig):
+ def _write_network_state(self, network_state):
_maybe_remove_legacy_eth0()
- return self._supported_write_network_config(netconfig)
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '--all')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
+ return super()._write_network_state(network_state)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/dragonflybsd.py b/cloudinit/distros/dragonflybsd.py
new file mode 100644
index 00000000..2d825518
--- /dev/null
+++ b/cloudinit/distros/dragonflybsd.py
@@ -0,0 +1,12 @@
+# Copyright (C) 2020-2021 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import cloudinit.distros.freebsd
+
+
+class Distro(cloudinit.distros.freebsd.Distro):
+ home_dir = '/home'
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/eurolinux.py b/cloudinit/distros/eurolinux.py
new file mode 100644
index 00000000..edb3165d
--- /dev/null
+++ b/cloudinit/distros/eurolinux.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 9659843f..d94a52b8 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -18,6 +18,12 @@ LOG = logging.getLogger(__name__)
class Distro(cloudinit.distros.bsd.BSD):
+ """
+ Distro subclass for FreeBSD.
+
+ (N.B. DragonFlyBSD inherits from this class.)
+ """
+
usr_lib_exec = '/usr/local/lib'
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
@@ -28,6 +34,7 @@ class Distro(cloudinit.distros.bsd.BSD):
pkg_cmd_update_prefix = ["pkg", "update"]
pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
prefer_fqdn = True # See rc.conf(5) in FreeBSD
+ home_dir = '/usr/home'
def _get_add_member_to_group_cmd(self, member_name, group_name):
return ['pw', 'usermod', '-n', member_name, '-G', group_name]
@@ -66,9 +73,12 @@ class Distro(cloudinit.distros.bsd.BSD):
pw_useradd_cmd.append('-d/nonexistent')
log_pw_useradd_cmd.append('-d/nonexistent')
else:
- pw_useradd_cmd.append('-d/usr/home/%s' % name)
+ pw_useradd_cmd.append('-d{home_dir}/{name}'.format(
+ home_dir=self.home_dir, name=name))
pw_useradd_cmd.append('-m')
- log_pw_useradd_cmd.append('-d/usr/home/%s' % name)
+ log_pw_useradd_cmd.append('-d{home_dir}/{name}'.format(
+ home_dir=self.home_dir, name=name))
+
log_pw_useradd_cmd.append('-m')
# Run the command
@@ -155,4 +165,5 @@ class Distro(cloudinit.distros.bsd.BSD):
"update-sources", self.package_command,
["update"], freq=PER_INSTANCE)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index e9b82602..1be76dc8 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -23,7 +23,6 @@ LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
locale_conf_fn = '/etc/locale.gen'
network_conf_fn = '/etc/conf.d/net'
- resolve_conf_fn = '/etc/resolv.conf'
hostname_conf_fn = '/etc/conf.d/hostname'
init_cmd = ['rc-service'] # init scripts
@@ -150,12 +149,12 @@ class Distro(distros.Distro):
else:
return distros.Distro._bring_up_interfaces(self, device_names)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
@@ -164,8 +163,8 @@ class Distro(distros.Distro):
# Many distro's format is the hostname by itself, and that is the
# way HostnameConf works but gentoo expects it to be in
# hostname="the-actual-hostname"
- conf.set_hostname('hostname="%s"' % your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf.set_hostname('hostname="%s"' % hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 7ca0ef99..2a7497cc 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -27,7 +27,6 @@ class Distro(distros.Distro):
locale_conf_fn = '/etc/sysconfig/language'
network_conf_fn = '/etc/sysconfig/network/config'
network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
- resolve_conf_fn = '/etc/resolv.conf'
route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
systemd_hostname_conf_fn = '/etc/hostname'
systemd_locale_conf_fn = '/etc/locale.conf'
@@ -117,12 +116,6 @@ class Distro(distros.Distro):
self._runner.run("update-sources", self.package_command,
['refresh'], freq=PER_INSTANCE)
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
def _read_hostname(self, filename, default=None):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
@@ -157,9 +150,9 @@ class Distro(distros.Distro):
host_fn = self.hostname_conf_fn
return (host_fn, self._read_hostname(host_fn))
- def _write_hostname(self, hostname, out_fn):
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
+ def _write_hostname(self, hostname, filename):
+ if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ util.write_file(filename, hostname)
elif self.uses_systemd():
subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
@@ -167,16 +160,13 @@ class Distro(distros.Distro):
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), 0o644)
-
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
+ util.write_file(filename, str(conf), 0o644)
@property
def preferred_ntp_clients(self):
diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py
new file mode 100644
index 00000000..4ff90ea6
--- /dev/null
+++ b/cloudinit/distros/photon.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2021 VMware Inc.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import net
+from cloudinit import util
+from cloudinit import subp
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.distros import rhel_util as rhutil
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ systemd_hostname_conf_fn = '/etc/hostname'
+ network_conf_dir = '/etc/systemd/network/'
+ systemd_locale_conf_fn = '/etc/locale.conf'
+ resolve_conf_fn = '/etc/systemd/resolved.conf'
+
+ renderer_configs = {
+ 'networkd': {
+ 'resolv_conf_fn': resolve_conf_fn,
+ 'network_conf_dir': network_conf_dir,
+ }
+ }
+
+ # Should be fqdn if we can use it
+ prefer_fqdn = True
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'photon'
+ self.init_cmd = ['systemctl']
+
+ def exec_cmd(self, cmd, capture=True):
+ LOG.debug('Attempting to run: %s', cmd)
+ try:
+ (out, err) = subp.subp(cmd, capture=capture)
+ if err:
+ LOG.warning('Running %s resulted in stderr output: %s',
+ cmd, err)
+ return True, out, err
+ return False, out, err
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, 'Command %s failed', cmd)
+ return True, None, None
+
+ def generate_fallback_config(self):
+ key = 'disable_fallback_netcfg'
+ disable_fallback_netcfg = self._cfg.get(key, True)
+ LOG.debug('%s value is: %s', key, disable_fallback_netcfg)
+
+ if not disable_fallback_netcfg:
+ return net.generate_fallback_config()
+
+ LOG.info(
+ 'Skipping generate_fallback_config. Rely on PhotonOS default '
+ 'network config'
+ )
+ return None
+
+ def apply_locale(self, locale, out_fn=None):
+ # This has a dependancy on glibc-i18n, user need to manually install it
+ # and enable the option in cloud.cfg
+ if not out_fn:
+ out_fn = self.systemd_locale_conf_fn
+
+ locale_cfg = {
+ 'LANG': locale,
+ }
+
+ rhutil.update_sysconfig_file(out_fn, locale_cfg)
+
+ # rhutil will modify /etc/locale.conf
+ # For locale change to take effect, reboot is needed or we can restart
+ # systemd-localed. This is equivalent of localectl
+ cmd = ['systemctl', 'restart', 'systemd-localed']
+ self.exec_cmd(cmd)
+
+ def install_packages(self, pkglist):
+ # self.update_package_sources()
+ self.package_command('install', pkgs=pkglist)
+
+ def _write_hostname(self, hostname, filename):
+ if filename and filename.endswith('/previous-hostname'):
+ util.write_file(filename, hostname)
+ else:
+ ret, _out, err = self.exec_cmd(['hostnamectl', 'set-hostname',
+ str(hostname)])
+ if ret:
+ LOG.warning(('Error while setting hostname: %s\n'
+ 'Given hostname: %s', err, hostname))
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.systemd_hostname_conf_fn)
+ return (self.systemd_hostname_conf_fn, sys_hostname)
+
+ def _read_hostname(self, filename, default=None):
+ if filename and filename.endswith('/previous-hostname'):
+ return util.load_file(filename).strip()
+
+ _ret, out, _err = self.exec_cmd(['hostname', '-f'])
+ return out.strip() if out else default
+
+ def _get_localhost_ip(self):
+ return '127.0.1.1'
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def package_command(self, command, args=None, pkgs=None):
+ if not pkgs:
+ pkgs = []
+
+ cmd = ['tdnf', '-y']
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ cmd.append(command)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ ret, _out, err = self.exec_cmd(cmd)
+ if ret:
+ LOG.error('Error while installing packages: %s', err)
+
+ def update_package_sources(self):
+ self._runner.run('update-sources', self.package_command,
+ ['makecache'], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 0c00a531..c9ee2747 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -36,7 +36,6 @@ class Distro(distros.Distro):
hostname_conf_fn = "/etc/sysconfig/network"
systemd_hostname_conf_fn = "/etc/hostname"
network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
- resolve_conf_fn = "/etc/resolv.conf"
tz_local_fn = "/etc/localtime"
usr_lib_exec = "/usr/libexec"
renderer_configs = {
@@ -66,9 +65,6 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.package_command('install', pkgs=pkglist)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
@@ -82,18 +78,18 @@ class Distro(distros.Distro):
}
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
- def _write_hostname(self, hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
# systemd will never update previous-hostname for us, so
# we need to do it ourselves
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
+ if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ util.write_file(filename, hostname)
elif self.uses_systemd():
subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
'HOSTNAME': hostname,
}
- rhel_util.update_sysconfig_file(out_fn, host_cfg)
+ rhel_util.update_sysconfig_file(filename, host_cfg)
def _read_system_hostname(self):
if self.uses_systemd():
@@ -118,12 +114,6 @@ class Distro(distros.Distro):
else:
return default
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
if self.uses_systemd():
diff --git a/cloudinit/distros/rocky.py b/cloudinit/distros/rocky.py
new file mode 100644
index 00000000..edb3165d
--- /dev/null
+++ b/cloudinit/distros/rocky.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/virtuozzo.py b/cloudinit/distros/virtuozzo.py
new file mode 100644
index 00000000..edb3165d
--- /dev/null
+++ b/cloudinit/distros/virtuozzo.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/event.py b/cloudinit/event.py
index f7b311fb..53ad4c25 100644
--- a/cloudinit/event.py
+++ b/cloudinit/event.py
@@ -1,17 +1,73 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
"""Classes and functions related to event handling."""
+from enum import Enum
+from typing import Dict, Set
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
-# Event types which can generate maintenance requests for cloud-init.
-class EventType(object):
- BOOT = "System boot"
- BOOT_NEW_INSTANCE = "New instance first boot"
+class EventScope(Enum):
+ # NETWORK is currently the only scope, but we want to leave room to
+ # grow other scopes (e.g., STORAGE) without having to make breaking
+ # changes to the user config
+ NETWORK = 'network'
- # TODO: Cloud-init will grow support for the follow event types:
- # UDEV
+ def __str__(self): # pylint: disable=invalid-str-returned
+ return self.value
+
+
+class EventType(Enum):
+ """Event types which can generate maintenance requests for cloud-init."""
+ # Cloud-init should grow support for the follow event types:
+ # HOTPLUG
# METADATA_CHANGE
# USER_REQUEST
+ BOOT = "boot"
+ BOOT_NEW_INSTANCE = "boot-new-instance"
+ BOOT_LEGACY = "boot-legacy"
+ HOTPLUG = 'hotplug'
+
+ def __str__(self): # pylint: disable=invalid-str-returned
+ return self.value
+
+
+def userdata_to_events(user_config: dict) -> Dict[EventScope, Set[EventType]]:
+ """Convert userdata into update config format defined on datasource.
+
+ Userdata is in the form of (e.g):
+ {'network': {'when': ['boot']}}
+
+ DataSource config is in the form of:
+ {EventScope.Network: {EventType.BOOT}}
+
+ Take the first and return the second
+ """
+ update_config = {}
+ for scope, scope_list in user_config.items():
+ try:
+ new_scope = EventScope(scope)
+ except ValueError as e:
+ LOG.warning(
+ "%s! Update data will be ignored for '%s' scope",
+ str(e),
+ scope,
+ )
+ continue
+ try:
+ new_values = [EventType(x) for x in scope_list['when']]
+ except ValueError as e:
+ LOG.warning(
+ "%s! Update data will be ignored for '%s' scope",
+ str(e),
+ scope,
+ )
+ new_values = []
+ update_config[new_scope] = set(new_values)
+
+ return update_config
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index aadfbf86..5033abbb 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -12,7 +12,7 @@ except ImportError:
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.templater import render_string, MISSING_JINJA_PREFIX
from cloudinit.util import b64d, load_file, load_json, json_dumps
@@ -36,7 +36,8 @@ class JinjaTemplatePartHandler(handlers.Handler):
def handle_part(self, data, ctype, filename, payload, frequency, headers):
if ctype in handlers.CONTENT_SIGNALS:
return
- jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ jinja_json_file = os.path.join(
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
rendered_payload = render_jinja_payload_from_file(
payload, filename, jinja_json_file)
if not rendered_payload:
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 2e5df042..10149907 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -8,7 +8,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import collections
+import collections.abc
import io
import logging
import logging.config
@@ -78,7 +78,7 @@ def setupLogging(cfg=None):
for a_cfg in cfg['log_cfgs']:
if isinstance(a_cfg, str):
log_cfgs.append(a_cfg)
- elif isinstance(a_cfg, (collections.Iterable)):
+ elif isinstance(a_cfg, (collections.abc.Iterable)):
cfg_str = [str(c) for c in a_cfg]
log_cfgs.append('\n'.join(cfg_str))
else:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 6b3b84f7..017c50c5 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -313,11 +313,11 @@ def is_netfail_standby(devname, driver=None):
def is_renamed(devname):
"""
/* interface name assignment types (sysfs name_assign_type attribute) */
- #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
- #define NET_NAME_ENUM 1 /* enumerated by kernel */
- #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
- #define NET_NAME_USER 3 /* provided by user-space */
- #define NET_NAME_RENAMED 4 /* renamed by user-space */
+ #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
+ #define NET_NAME_ENUM 1 /* enumerated by kernel */
+ #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
+ #define NET_NAME_USER 3 /* provided by user-space */
+ #define NET_NAME_RENAMED 4 /* renamed by user-space */
"""
name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
if name_assign_type and name_assign_type in ['3', '4']:
@@ -351,7 +351,7 @@ def device_devid(devname):
def get_devicelist():
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return list(get_interfaces_by_mac().values())
try:
@@ -376,7 +376,7 @@ def is_disabled_cfg(cfg):
def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return find_fallback_nic_on_freebsd(blacklist_drivers)
elif util.is_NetBSD() or util.is_OpenBSD():
return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers)
@@ -661,6 +661,8 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
cur['name'] = name
cur_info[name] = cur
+ LOG.debug("Detected interfaces %s", cur_info)
+
def update_byname(bymac):
return dict((data['name'], data)
for data in cur_info.values())
@@ -816,7 +818,7 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac(blacklist_drivers=None) -> dict:
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return get_interfaces_by_mac_on_freebsd(
blacklist_drivers=blacklist_drivers)
elif util.is_NetBSD():
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
new file mode 100644
index 00000000..11149548
--- /dev/null
+++ b/cloudinit/net/activators.py
@@ -0,0 +1,279 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+from abc import ABC, abstractmethod
+from typing import Iterable, List, Type
+
+from cloudinit import subp
+from cloudinit import util
+from cloudinit.net.eni import available as eni_available
+from cloudinit.net.netplan import available as netplan_available
+from cloudinit.net.networkd import available as networkd_available
+from cloudinit.net.network_state import NetworkState
+from cloudinit.net.sysconfig import NM_CFG_FILE
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _alter_interface(cmd, device_name) -> bool:
+ LOG.debug("Attempting command %s for device %s", cmd, device_name)
+ try:
+ (_out, err) = subp.subp(cmd)
+ if len(err):
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
+ return True
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+ return False
+
+
+class NetworkActivator(ABC):
+ @staticmethod
+ @abstractmethod
+ def available() -> bool:
+ """Return True if activator is available, otherwise return False."""
+ raise NotImplementedError()
+
+ @staticmethod
+ @abstractmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface.
+
+ Return True is successful, otherwise return False
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ @abstractmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring down interface.
+
+ Return True is successful, otherwise return False
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def bring_up_interfaces(cls, device_names: Iterable[str]) -> bool:
+ """Bring up specified list of interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return all(cls.bring_up_interface(device) for device in device_names)
+
+ @classmethod
+ def bring_up_all_interfaces(cls, network_state: NetworkState) -> bool:
+ """Bring up all interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return cls.bring_up_interfaces(
+ [i['name'] for i in network_state.iter_interfaces()]
+ )
+
+ @classmethod
+ def bring_down_interfaces(cls, device_names: Iterable[str]) -> bool:
+ """Bring down specified list of interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return all(cls.bring_down_interface(device) for device in device_names)
+
+ @classmethod
+ def bring_down_all_interfaces(cls, network_state: NetworkState) -> bool:
+ """Bring down all interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return cls.bring_down_interfaces(
+ [i['name'] for i in network_state.iter_interfaces()]
+ )
+
+
+class IfUpDownActivator(NetworkActivator):
+ # Note that we're not overriding bring_up_interfaces to pass something
+ # like ifup --all because it isn't supported everywhere.
+ # E.g., NetworkManager has a ifupdown plugin that requires the name
+ # of a specific connection.
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if ifupdown can be used on this system."""
+ return eni_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface using ifup.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['ifup', device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring up interface using ifup.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['ifdown', device_name]
+ return _alter_interface(cmd, device_name)
+
+
+class NetworkManagerActivator(NetworkActivator):
+ @staticmethod
+ def available(target=None) -> bool:
+ """ Return true if network manager can be used on this system."""
+ config_present = os.path.isfile(
+ subp.target_path(target, path=NM_CFG_FILE)
+ )
+ nmcli_present = subp.which('nmcli', target=target)
+ return config_present and bool(nmcli_present)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface using nmcli.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['nmcli', 'connection', 'up', 'ifname', device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring down interface using nmcli.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['nmcli', 'connection', 'down', device_name]
+ return _alter_interface(cmd, device_name)
+
+
+class NetplanActivator(NetworkActivator):
+ NETPLAN_CMD = ['netplan', 'apply']
+
+ @staticmethod
+ def available(target=None) -> bool:
+ """ Return true if netplan can be used on this system."""
+ return netplan_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_up_interfaces(device_names: Iterable[str]) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_up_all_interfaces(network_state: NetworkState) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_down_interfaces(device_names: Iterable[str]) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_down_all_interfaces(network_state: NetworkState) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+
+class NetworkdActivator(NetworkActivator):
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if ifupdown can be used on this system."""
+ return networkd_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """ Return True is successful, otherwise return False """
+ cmd = ['ip', 'link', 'set', 'up', device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_up_all_interfaces(network_state: NetworkState) -> bool:
+ """ Return True is successful, otherwise return False """
+ cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved']
+ return _alter_interface(cmd, 'all')
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """ Return True is successful, otherwise return False """
+ cmd = ['ip', 'link', 'set', 'down', device_name]
+ return _alter_interface(cmd, device_name)
+
+
+# This section is mostly copied and pasted from renderers.py. An abstract
+# version to encompass both seems overkill at this point
+DEFAULT_PRIORITY = [
+ IfUpDownActivator,
+ NetworkManagerActivator,
+ NetplanActivator,
+ NetworkdActivator,
+]
+
+
+def search_activator(
+ priority=None, target=None
+) -> List[Type[NetworkActivator]]:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+
+ unknown = [i for i in priority if i not in DEFAULT_PRIORITY]
+ if unknown:
+ raise ValueError(
+ "Unknown activators provided in priority list: %s" % unknown)
+
+ return [activator for activator in priority if activator.available(target)]
+
+
+def select_activator(priority=None, target=None) -> Type[NetworkActivator]:
+ found = search_activator(priority, target)
+ if not found:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+ tmsg = ""
+ if target and target != "/":
+ tmsg = " in target=%s" % target
+ raise RuntimeError(
+ "No available network activators found%s. Searched "
+ "through list: %s" % (tmsg, priority))
+ selected = found[0]
+ LOG.debug('Using selected activator: %s', selected)
+ return selected
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index e34e0454..916cea32 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -33,7 +33,7 @@ class BSDRenderer(renderer.Renderer):
self.interface_configurations = {}
self._postcmds = config.get('postcmds', True)
- def _ifconfig_entries(self, settings, target=None):
+ def _ifconfig_entries(self, settings):
ifname_by_mac = net.get_interfaces_by_mac()
for interface in settings.iter_interfaces():
device_name = interface.get("name")
@@ -76,9 +76,10 @@ class BSDRenderer(renderer.Renderer):
self.interface_configurations[device_name] = {
'address': subnet.get('address'),
'netmask': subnet.get('netmask'),
+ 'mtu': subnet.get('mtu') or interface.get('mtu'),
}
- def _route_entries(self, settings, target=None):
+ def _route_entries(self, settings):
routes = list(settings.iter_routes())
for interface in settings.iter_interfaces():
subnets = interface.get("subnets", [])
@@ -101,7 +102,7 @@ class BSDRenderer(renderer.Renderer):
gateway = route.get('gateway')
self.set_route(network, netmask, gateway)
- def _resolve_conf(self, settings, target=None):
+ def _resolve_conf(self, settings):
nameservers = settings.dns_nameservers
searchdomains = settings.dns_searchdomains
for interface in settings.iter_interfaces():
@@ -114,11 +115,11 @@ class BSDRenderer(renderer.Renderer):
# fails.
try:
resolvconf = ResolvConf(util.load_file(subp.target_path(
- target, self.resolv_conf_fn)))
+ self.target, self.resolv_conf_fn)))
resolvconf.parse()
except IOError:
util.logexc(LOG, "Failed to parse %s, use new empty file",
- subp.target_path(target, self.resolv_conf_fn))
+ subp.target_path(self.target, self.resolv_conf_fn))
resolvconf = ResolvConf('')
resolvconf.parse()
@@ -136,10 +137,12 @@ class BSDRenderer(renderer.Renderer):
except ValueError:
util.logexc(LOG, "Failed to add search domain %s", domain)
util.write_file(
- subp.target_path(target, self.resolv_conf_fn),
+ subp.target_path(self.target, self.resolv_conf_fn),
str(resolvconf), 0o644)
def render_network_state(self, network_state, templates=None, target=None):
+ if target:
+ self.target = target
self._ifconfig_entries(settings=network_state)
self._route_entries(settings=network_state)
self._resolve_conf(settings=network_state)
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 4394c68b..9b94c9a0 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -173,7 +173,7 @@ def parse_dhcp_lease_file(lease_file):
@raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile
content.
"""
- lease_regex = re.compile(r"lease {(?P<lease>[^}]*)}\n")
+ lease_regex = re.compile(r"lease {(?P<lease>.*?)}\n", re.DOTALL)
dhcp_leases = []
lease_content = util.load_file(lease_file)
if len(lease_content) == 0:
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index 0285dfec..f8faf240 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -19,18 +19,26 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
def write_config(self):
for device_name, v in self.interface_configurations.items():
+ net_config = 'DHCP'
if isinstance(v, dict):
- self.set_rc_config_value(
- 'ifconfig_' + device_name,
- v.get('address') + ' netmask ' + v.get('netmask'))
- else:
- self.set_rc_config_value('ifconfig_' + device_name, 'DHCP')
+ net_config = v.get('address') + ' netmask ' + v.get('netmask')
+ mtu = v.get('mtu')
+ if mtu:
+ net_config += (' mtu %d' % mtu)
+ self.set_rc_config_value('ifconfig_' + device_name, net_config)
def start_services(self, run=False):
if not run:
LOG.debug("freebsd generate postcmd disabled")
return
+ for dhcp_interface in self.dhcp_interfaces():
+ # Observed on DragonFlyBSD 6. If we use the "restart" parameter,
+ # the routes are not recreated.
+ subp.subp(['service', 'dhclient', 'stop', dhcp_interface],
+ rcs=[0, 1],
+ capture=True)
+
subp.subp(['service', 'netif', 'restart'], capture=True)
# On FreeBSD 10, the restart of routing and dhclient is likely to fail
# because
@@ -41,7 +49,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
for dhcp_interface in self.dhcp_interfaces():
- subp.subp(['service', 'dhclient', 'restart', dhcp_interface],
+ subp.subp(['service', 'dhclient', 'start', dhcp_interface],
rcs=[0, 1],
capture=True)
@@ -56,4 +64,4 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
def available(target=None):
- return util.is_FreeBSD()
+ return util.is_FreeBSD() or util.is_DragonFlyBSD()
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
index 71b38ee6..5f8881a5 100644
--- a/cloudinit/net/netbsd.py
+++ b/cloudinit/net/netbsd.py
@@ -22,9 +22,11 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
)
for device_name, v in self.interface_configurations.items():
if isinstance(v, dict):
- self.set_rc_config_value(
- 'ifconfig_' + device_name,
- v.get('address') + ' netmask ' + v.get('netmask'))
+ net_config = v.get('address') + ' netmask ' + v.get('netmask')
+ mtu = v.get('mtu')
+ if mtu:
+ net_config += (' mtu %d' % mtu)
+ self.set_rc_config_value('ifconfig_' + device_name, net_config)
def start_services(self, run=False):
if not run:
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 53347c83..41acf963 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -4,7 +4,12 @@ import copy
import os
from . import renderer
-from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES
+from .network_state import (
+ NetworkState,
+ subnet_is_ipv6,
+ NET_CONFIG_TO_V2,
+ IPV6_DYNAMIC_TYPES,
+)
from cloudinit import log as logging
from cloudinit import util
@@ -256,7 +261,7 @@ class Renderer(renderer.Renderer):
os.path.islink(SYS_CLASS_NET + iface)]:
subp.subp(cmd, capture=True)
- def _render_content(self, network_state):
+ def _render_content(self, network_state: NetworkState):
# if content already in netplan format, pass it back
if network_state.version == 2:
@@ -426,4 +431,5 @@ def network_state_to_netplan(network_state, header=None):
contents = renderer._render_content(network_state)
return header + contents
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index e8bf9e39..95b064f0 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -58,38 +58,6 @@ NET_CONFIG_TO_V2 = {
'bridge_waitport': None}}
-def parse_net_config_data(net_config, skip_broken=True):
- """Parses the config, returns NetworkState object
-
- :param net_config: curtin network config dict
- """
- state = None
- version = net_config.get('version')
- config = net_config.get('config')
- if version == 2:
- # v2 does not have explicit 'config' key so we
- # pass the whole net-config as-is
- config = net_config
-
- if version and config is not None:
- nsi = NetworkStateInterpreter(version=version, config=config)
- nsi.parse_config(skip_broken=skip_broken)
- state = nsi.get_network_state()
-
- return state
-
-
-def parse_net_config(path, skip_broken=True):
- """Parses a curtin network configuration file and
- return network state"""
- ns = None
- net_config = util.read_conf(path)
- if 'network' in net_config:
- ns = parse_net_config_data(net_config.get('network'),
- skip_broken=skip_broken)
- return ns
-
-
def from_state_file(state_file):
state = util.read_conf(state_file)
nsi = NetworkStateInterpreter()
@@ -237,6 +205,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
self._network_state = copy.deepcopy(self.initial_network_state)
self._network_state['config'] = config
self._parsed = False
+ self._interface_dns_map = {}
@property
def network_state(self):
@@ -310,6 +279,21 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
LOG.warning("Skipping invalid command: %s", command,
exc_info=True)
LOG.debug(self.dump_network_state())
+ for interface, dns in self._interface_dns_map.items():
+ iface = None
+ try:
+ iface = self._network_state['interfaces'][interface]
+ except KeyError as e:
+ raise ValueError(
+ 'Nameserver specified for interface {0}, '
+ 'but interface {0} does not exist!'.format(interface)
+ ) from e
+ if iface:
+ nameservers, search = dns
+ iface['dns'] = {
+ 'addresses': nameservers,
+ 'search': search,
+ }
def parse_config_v2(self, skip_broken=True):
for command_type, command in self._config.items():
@@ -526,21 +510,40 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def handle_infiniband(self, command):
self.handle_physical(command)
- @ensure_command_keys(['address'])
- def handle_nameserver(self, command):
- dns = self._network_state.get('dns')
+ def _parse_dns(self, command):
+ nameservers = []
+ search = []
if 'address' in command:
addrs = command['address']
if not type(addrs) == list:
addrs = [addrs]
for addr in addrs:
- dns['nameservers'].append(addr)
+ nameservers.append(addr)
if 'search' in command:
paths = command['search']
if not isinstance(paths, list):
paths = [paths]
for path in paths:
- dns['search'].append(path)
+ search.append(path)
+ return nameservers, search
+
+ @ensure_command_keys(['address'])
+ def handle_nameserver(self, command):
+ dns = self._network_state.get('dns')
+ nameservers, search = self._parse_dns(command)
+ if 'interface' in command:
+ self._interface_dns_map[command['interface']] = (
+ nameservers, search
+ )
+ else:
+ dns['nameservers'].extend(nameservers)
+ dns['search'].extend(search)
+
+ @ensure_command_keys(['address'])
+ def _handle_individual_nameserver(self, command, iface):
+ _iface = self._network_state.get('interfaces')
+ nameservers, search = self._parse_dns(command)
+ _iface[iface]['dns'] = {'nameservers': nameservers, 'search': search}
@ensure_command_keys(['destination'])
def handle_route(self, command):
@@ -706,16 +709,17 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def _v2_common(self, cfg):
LOG.debug('v2_common: handling config:\n%s', cfg)
- if 'nameservers' in cfg:
- search = cfg.get('nameservers').get('search', [])
- dns = cfg.get('nameservers').get('addresses', [])
- name_cmd = {'type': 'nameserver'}
- if len(search) > 0:
- name_cmd.update({'search': search})
- if len(dns) > 0:
- name_cmd.update({'addresses': dns})
- LOG.debug('v2(nameserver) -> v1(nameserver):\n%s', name_cmd)
- self.handle_nameserver(name_cmd)
+ for iface, dev_cfg in cfg.items():
+ if 'nameservers' in dev_cfg:
+ search = dev_cfg.get('nameservers').get('search', [])
+ dns = dev_cfg.get('nameservers').get('addresses', [])
+ name_cmd = {'type': 'nameserver'}
+ if len(search) > 0:
+ name_cmd.update({'search': search})
+ if len(dns) > 0:
+ name_cmd.update({'address': dns})
+ self.handle_nameserver(name_cmd)
+ self._handle_individual_nameserver(name_cmd, iface)
def _handle_bond_bridge(self, command, cmd_type=None):
"""Common handler for bond and bridge types"""
@@ -1052,4 +1056,31 @@ def mask_and_ipv4_to_bcast_addr(mask, ip):
return bcast_str
+def parse_net_config_data(net_config, skip_broken=True) -> NetworkState:
+ """Parses the config, returns NetworkState object
+
+ :param net_config: curtin network config dict
+ """
+ state = None
+ version = net_config.get('version')
+ config = net_config.get('config')
+ if version == 2:
+ # v2 does not have explicit 'config' key so we
+ # pass the whole net-config as-is
+ config = net_config
+
+ if version and config is not None:
+ nsi = NetworkStateInterpreter(version=version, config=config)
+ nsi.parse_config(skip_broken=skip_broken)
+ state = nsi.get_network_state()
+
+ if not state:
+ raise RuntimeError(
+ "No valid network_state object created from network config. "
+ "Did you specify the correct version?"
+ )
+
+ return state
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
new file mode 100644
index 00000000..a311572f
--- /dev/null
+++ b/cloudinit/net/networkd.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python3
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2021 VMware Inc.
+#
+# Author: Shreenidhi Shedi <yesshedi@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+
+
+from . import renderer
+from cloudinit import util
+from cloudinit import subp
+from cloudinit import log as logging
+from collections import OrderedDict
+
+LOG = logging.getLogger(__name__)
+
+
+class CfgParser:
+ def __init__(self):
+ self.conf_dict = OrderedDict({
+ 'Match': [],
+ 'Link': [],
+ 'Network': [],
+ 'DHCPv4': [],
+ 'DHCPv6': [],
+ 'Address': [],
+ 'Route': [],
+ })
+
+ def update_section(self, sec, key, val):
+ for k in self.conf_dict.keys():
+ if k == sec:
+ self.conf_dict[k].append(key+'='+str(val))
+ # remove duplicates from list
+ self.conf_dict[k] = list(dict.fromkeys(self.conf_dict[k]))
+ self.conf_dict[k].sort()
+
+ def get_final_conf(self):
+ contents = ''
+ for k, v in self.conf_dict.items():
+ if not v:
+ continue
+ contents += '['+k+']\n'
+ for e in v:
+ contents += e + '\n'
+ contents += '\n'
+
+ return contents
+
+ def dump_data(self, target_fn):
+ if not target_fn:
+ LOG.warning('Target file not given')
+ return
+
+ contents = self.get_final_conf()
+ LOG.debug('Final content: %s', contents)
+ util.write_file(target_fn, contents)
+
+
+class Renderer(renderer.Renderer):
+ """
+ Renders network information in /etc/systemd/network
+
+ This Renderer is currently experimental and doesn't support all the
+ use cases supported by the other renderers yet.
+ """
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.resolve_conf_fn = config.get('resolve_conf_fn',
+ '/etc/systemd/resolved.conf')
+ self.network_conf_dir = config.get('network_conf_dir',
+ '/etc/systemd/network/')
+
+ def generate_match_section(self, iface, cfg):
+ sec = 'Match'
+ match_dict = {
+ 'name': 'Name',
+ 'driver': 'Driver',
+ 'mac_address': 'MACAddress'
+ }
+
+ if not iface:
+ return
+
+ for k, v in match_dict.items():
+ if k in iface and iface[k]:
+ cfg.update_section(sec, v, iface[k])
+
+ return iface['name']
+
+ def generate_link_section(self, iface, cfg):
+ sec = 'Link'
+
+ if not iface:
+ return
+
+ if 'mtu' in iface and iface['mtu']:
+ cfg.update_section(sec, 'MTUBytes', iface['mtu'])
+
+ def parse_routes(self, conf, cfg):
+ sec = 'Route'
+ route_cfg_map = {
+ 'gateway': 'Gateway',
+ 'network': 'Destination',
+ 'metric': 'Metric',
+ }
+
+ # prefix is derived using netmask by network_state
+ prefix = ''
+ if 'prefix' in conf:
+ prefix = '/' + str(conf['prefix'])
+
+ for k, v in conf.items():
+ if k not in route_cfg_map:
+ continue
+ if k == 'network':
+ v += prefix
+ cfg.update_section(sec, route_cfg_map[k], v)
+
+ def parse_subnets(self, iface, cfg):
+ dhcp = 'no'
+ sec = 'Network'
+ for e in iface.get('subnets', []):
+ t = e['type']
+ if t == 'dhcp4' or t == 'dhcp':
+ if dhcp == 'no':
+ dhcp = 'ipv4'
+ elif dhcp == 'ipv6':
+ dhcp = 'yes'
+ elif t == 'dhcp6':
+ if dhcp == 'no':
+ dhcp = 'ipv6'
+ elif dhcp == 'ipv4':
+ dhcp = 'yes'
+ if 'routes' in e and e['routes']:
+ for i in e['routes']:
+ self.parse_routes(i, cfg)
+ if 'address' in e:
+ subnet_cfg_map = {
+ 'address': 'Address',
+ 'gateway': 'Gateway',
+ 'dns_nameservers': 'DNS',
+ 'dns_search': 'Domains',
+ }
+ for k, v in e.items():
+ if k == 'address':
+ if 'prefix' in e:
+ v += '/' + str(e['prefix'])
+ cfg.update_section('Address', subnet_cfg_map[k], v)
+ elif k == 'gateway':
+ cfg.update_section('Route', subnet_cfg_map[k], v)
+ elif k == 'dns_nameservers' or k == 'dns_search':
+ cfg.update_section(sec, subnet_cfg_map[k], ' '.join(v))
+
+ cfg.update_section(sec, 'DHCP', dhcp)
+
+ # This is to accommodate extra keys present in VMware config
+ def dhcp_domain(self, d, cfg):
+ for item in ['dhcp4domain', 'dhcp6domain']:
+ if item not in d:
+ continue
+ ret = str(d[item]).casefold()
+ try:
+ ret = util.translate_bool(ret)
+ ret = 'yes' if ret else 'no'
+ except ValueError:
+ if ret != 'route':
+ LOG.warning('Invalid dhcp4domain value - %s', ret)
+ ret = 'no'
+ if item == 'dhcp4domain':
+ section = 'DHCPv4'
+ else:
+ section = 'DHCPv6'
+ cfg.update_section(section, 'UseDomains', ret)
+
+ def parse_dns(self, iface, cfg, ns):
+ sec = 'Network'
+
+ dns_cfg_map = {
+ 'search': 'Domains',
+ 'nameservers': 'DNS',
+ 'addresses': 'DNS',
+ }
+
+ dns = iface.get('dns')
+ if not dns and ns.version == 1:
+ dns = {
+ 'search': ns.dns_searchdomains,
+ 'nameservers': ns.dns_nameservers,
+ }
+ elif not dns and ns.version == 2:
+ return
+
+ for k, v in dns_cfg_map.items():
+ if k in dns and dns[k]:
+ cfg.update_section(sec, v, ' '.join(dns[k]))
+
+ def create_network_file(self, link, conf, nwk_dir):
+ net_fn_owner = 'systemd-network'
+
+ LOG.debug('Setting Networking Config for %s', link)
+
+ net_fn = nwk_dir + '10-cloud-init-' + link + '.network'
+ util.write_file(net_fn, conf)
+ util.chownbyname(net_fn, net_fn_owner, net_fn_owner)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ fp_nwkd = self.network_conf_dir
+ if target:
+ fp_nwkd = subp.target_path(target) + fp_nwkd
+
+ util.ensure_dir(os.path.dirname(fp_nwkd))
+
+ ret_dict = self._render_content(network_state)
+ for k, v in ret_dict.items():
+ self.create_network_file(k, v, fp_nwkd)
+
+ def _render_content(self, ns):
+ ret_dict = {}
+ for iface in ns.iter_interfaces():
+ cfg = CfgParser()
+
+ link = self.generate_match_section(iface, cfg)
+ self.generate_link_section(iface, cfg)
+ self.parse_subnets(iface, cfg)
+ self.parse_dns(iface, cfg, ns)
+
+ for route in ns.iter_routes():
+ self.parse_routes(route, cfg)
+
+ if ns.version == 2:
+ name = iface['name']
+ # network state doesn't give dhcp domain info
+ # using ns.config as a workaround here
+ self.dhcp_domain(ns.config['ethernets'][name], cfg)
+
+ ret_dict.update({link: cfg.get_final_conf()})
+
+ return ret_dict
+
+
+def available(target=None):
+ expected = ['ip', 'systemctl']
+ search = ['/usr/bin', '/bin']
+ for p in expected:
+ if not subp.which(p, search=search, target=target):
+ return False
+ return True
+
+
+def network_state_to_networkd(ns):
+ renderer = Renderer({})
+ return renderer._render_content(ns)
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
index 166d77e6..d87d8a4f 100644
--- a/cloudinit/net/openbsd.py
+++ b/cloudinit/net/openbsd.py
@@ -18,7 +18,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
content = 'dhcp\n'
elif isinstance(v, dict):
try:
- content = "inet {address} {netmask}\n".format(
+ content = "inet {address} {netmask}".format(
address=v['address'],
netmask=v['netmask']
)
@@ -26,12 +26,19 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
LOG.error(
"Invalid static configuration for %s",
device_name)
+ mtu = v.get("mtu")
+ if mtu:
+ content += (' mtu %d' % mtu)
+ content += "\n"
util.write_file(fn, content)
def start_services(self, run=False):
if not self._postcmds:
LOG.debug("openbsd generate postcmd disabled")
return
+ subp.subp(['pkill', 'dhclient'], capture=True, rcs=[0, 1])
+ subp.subp(['route', 'del', 'default'], capture=True, rcs=[0, 1])
+ subp.subp(['route', 'flush', 'default'], capture=True, rcs=[0, 1])
subp.subp(['sh', '/etc/netstart'], capture=True)
def set_route(self, network, netmask, gateway):
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 2a61a7a8..27447bc2 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -28,6 +28,8 @@ filter_by_physical = filter_by_type('physical')
class Renderer(object):
+ def __init__(self, config=None):
+ pass
@staticmethod
def _render_persistent_net(network_state):
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index e2de4d55..822b45de 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -1,9 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from typing import List, Tuple, Type
+
from . import eni
from . import freebsd
from . import netbsd
from . import netplan
+from . import networkd
+from . import renderer
from . import RendererNotFoundError
from . import openbsd
from . import sysconfig
@@ -13,15 +17,18 @@ NAME_TO_RENDERER = {
"freebsd": freebsd,
"netbsd": netbsd,
"netplan": netplan,
+ "networkd": networkd,
"openbsd": openbsd,
"sysconfig": sysconfig,
}
DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd",
- "netbsd", "openbsd"]
+ "netbsd", "openbsd", "networkd"]
-def search(priority=None, target=None, first=False):
+def search(
+ priority=None, target=None, first=False
+) -> List[Tuple[str, Type[renderer.Renderer]]]:
if priority is None:
priority = DEFAULT_PRIORITY
@@ -38,13 +45,13 @@ def search(priority=None, target=None, first=False):
if render_mod.available(target):
cur = (name, render_mod.Renderer)
if first:
- return cur
+ return [cur]
found.append(cur)
return found
-def select(priority=None, target=None):
+def select(priority=None, target=None) -> Tuple[str, Type[renderer.Renderer]]:
found = search(priority, target=target, first=True)
if not found:
if priority is None:
@@ -55,6 +62,6 @@ def select(priority=None, target=None):
raise RendererNotFoundError(
"No available network renderers found%s. Searched "
"through list: %s" % (tmsg, priority))
- return found
+ return found[0]
# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 089b44b2..06f7255e 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -18,8 +18,9 @@ from .network_state import (
is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES)
LOG = logging.getLogger(__name__)
+KNOWN_DISTROS = ['almalinux', 'centos', 'eurolinux', 'fedora', 'rhel', 'rocky',
+ 'suse', 'virtuozzo']
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
-KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'suse']
def _make_header(sep='#'):
@@ -931,7 +932,9 @@ class Renderer(renderer.Renderer):
netrules_path = subp.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
if available_nm(target=target):
- enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
+ enable_ifcfg_rh(subp.target_path(
+ target, path=NM_CFG_FILE
+ ))
sysconfig_path = subp.target_path(target, templates.get('control'))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
@@ -978,7 +981,10 @@ def available_sysconfig(target=None):
def available_nm(target=None):
- if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)):
+ if not os.path.isfile(subp.target_path(
+ target,
+ path=NM_CFG_FILE
+ )):
return False
return True
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index 6f9a02de..5ae048e2 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -42,6 +42,7 @@ class TestParseDHCPLeasesFile(CiTestCase):
lease {
interface "wlp3s0";
fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
option subnet-mask 255.255.255.0;
option routers 192.168.2.1;
renew 4 2017/07/27 18:02:30;
@@ -50,6 +51,7 @@ class TestParseDHCPLeasesFile(CiTestCase):
lease {
interface "wlp3s0";
fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
option subnet-mask 255.255.255.0;
option routers 192.168.2.1;
}
@@ -58,8 +60,10 @@ class TestParseDHCPLeasesFile(CiTestCase):
{'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
'renew': '4 2017/07/27 18:02:30',
- 'expire': '5 2017/07/28 07:08:15'},
+ 'expire': '5 2017/07/28 07:08:15',
+ 'filename': 'http://192.168.2.50/boot.php?mac=${netX}'},
{'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'filename': 'http://192.168.2.50/boot.php?mac=${netX}',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
write_file(lease_file, content)
self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
index 07d726e2..84e8308a 100644
--- a/cloudinit/net/tests/test_network_state.py
+++ b/cloudinit/net/tests/test_network_state.py
@@ -2,12 +2,62 @@
from unittest import mock
+import pytest
+
+from cloudinit import safeyaml
from cloudinit.net import network_state
from cloudinit.tests.helpers import CiTestCase
netstate_path = 'cloudinit.net.network_state'
+_V1_CONFIG_NAMESERVERS = """\
+network:
+ version: 1
+ config:
+ - type: nameserver
+ interface: {iface}
+ address:
+ - 192.168.1.1
+ - 8.8.8.8
+ search:
+ - spam.local
+ - type: nameserver
+ address:
+ - 192.168.1.0
+ - 4.4.4.4
+ search:
+ - eggs.local
+ - type: physical
+ name: eth0
+ mac_address: '00:11:22:33:44:55'
+ - type: physical
+ name: eth1
+ mac_address: '66:77:88:99:00:11'
+"""
+
+V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface='eth1')
+V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface='eth90')
+
+V2_CONFIG_NAMESERVERS = """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ match:
+ macaddress: '00:11:22:33:44:55'
+ nameservers:
+ search: [spam.local, eggs.local]
+ addresses: [8.8.8.8]
+ eth1:
+ match:
+ macaddress: '66:77:88:99:00:11'
+ nameservers:
+ search: [foo.local, bar.local]
+ addresses: [4.4.4.4]
+"""
+
+
class TestNetworkStateParseConfig(CiTestCase):
def setUp(self):
@@ -17,11 +67,13 @@ class TestNetworkStateParseConfig(CiTestCase):
def test_missing_version_returns_none(self):
ncfg = {}
- self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
def test_unknown_versions_returns_none(self):
ncfg = {'version': 13.2}
- self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
def test_version_2_passes_self_as_config(self):
ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
@@ -55,4 +107,57 @@ class TestNetworkStateParseConfigV2(CiTestCase):
self.assertEqual(ncfg, nsi.as_dict()['config'])
+class TestNetworkStateParseNameservers:
+ def _parse_network_state_from_config(self, config):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml['network'])
+
+ def test_v1_nameservers_valid(self):
+ config = self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_VALID)
+
+ # If an interface was specified, DNS shouldn't be in the global list
+ assert ['192.168.1.0', '4.4.4.4'] == sorted(
+ config.dns_nameservers)
+ assert ['eggs.local'] == config.dns_searchdomains
+
+ # If an interface was specified, DNS should be part of the interface
+ for iface in config.iter_interfaces():
+ if iface['name'] == 'eth1':
+ assert iface['dns']['addresses'] == ['192.168.1.1', '8.8.8.8']
+ assert iface['dns']['search'] == ['spam.local']
+ else:
+ assert 'dns' not in iface
+
+ def test_v1_nameservers_invalid(self):
+ with pytest.raises(ValueError):
+ self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_INVALID)
+
+ def test_v2_nameservers(self):
+ config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS)
+
+ # Ensure DNS defined on interface exists on interface
+ for iface in config.iter_interfaces():
+ if iface['name'] == 'eth0':
+ assert iface['dns'] == {
+ 'nameservers': ['8.8.8.8'],
+ 'search': ['spam.local', 'eggs.local'],
+ }
+ else:
+ assert iface['dns'] == {
+ 'nameservers': ['4.4.4.4'],
+ 'search': ['foo.local', 'bar.local']
+ }
+
+ # Ensure DNS defined on interface also exists globally (since there
+ # is no global DNS definitions in v2)
+ assert ['4.4.4.4', '8.8.8.8'] == sorted(config.dns_nameservers)
+ assert [
+ 'bar.local',
+ 'eggs.local',
+ 'foo.local',
+ 'spam.local',
+ ] == sorted(config.dns_searchdomains)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
index 2df9441a..186d8ad8 100644
--- a/cloudinit/patcher.py
+++ b/cloudinit/patcher.py
@@ -6,7 +6,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import imp
import logging
import sys
@@ -20,7 +19,7 @@ class QuietStreamHandler(logging.StreamHandler):
pass
-def _patch_logging():
+def patch_logging():
# Replace 'handleError' with one that will be more
# tolerant of errors in that it can avoid
# re-notifying on exceptions and when errors
@@ -37,12 +36,4 @@ def _patch_logging():
pass
setattr(logging.Handler, 'handleError', handleError)
-
-def patch():
- imp.acquire_lock()
- try:
- _patch_logging()
- finally:
- imp.release_lock()
-
# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index b8677c8b..9afad747 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -165,7 +165,15 @@ class ReportEventStack(object):
:param result_on_exception:
The result value to set if an exception is caught. default
value is FAIL.
+
+ :param post_files:
+ Can hold filepaths of files that are to get posted/created
+ regarding a given event. Something like success or failure information
+ in a given log file. For each filepath, if it's a valid regular file
+ it will get: read & encoded as base64 at the close of the event.
+ Default value, if None, is an empty list.
"""
+
def __init__(self, name, description, message=None, parent=None,
reporting_enabled=None, result_on_exception=status.FAIL,
post_files=None):
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 23e4c0ad..f69005ea 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -43,6 +43,7 @@ CFG_BUILTIN = {
'Exoscale',
'RbxCloud',
'UpCloud',
+ 'VMware',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 2f3390c3..fddfe363 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -22,7 +22,7 @@ import requests
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
@@ -45,7 +45,8 @@ from cloudinit.sources.helpers.azure import (
is_byte_swapped,
dhcp_log_cb,
push_log_to_kvp,
- report_failure_to_fabric)
+ report_failure_to_fabric,
+ build_minimal_ovf)
LOG = logging.getLogger(__name__)
@@ -76,7 +77,7 @@ REPROVISION_NIC_ATTACH_MARKER_FILE = "/var/lib/cloud/data/wait_for_nic_attach"
REPROVISION_NIC_DETACHED_MARKER_FILE = "/var/lib/cloud/data/nic_detached"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
AGENT_SEED_DIR = '/var/lib/waagent'
-
+DEFAULT_PROVISIONING_ISO_DEV = '/dev/sr0'
# In the event where the IMDS primary server is not
# available, it takes 1s to fallback to the secondary one
@@ -338,6 +339,13 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
+ # Regenerate network config new_instance boot and every boot
+ default_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }}
+
_negotiated = False
_metadata_imds = sources.UNSET
_ci_pkl_version = 1
@@ -352,8 +360,6 @@ class DataSourceAzure(sources.DataSource):
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
self._network_config = None
- # Regenerate network config new_instance boot and every boot
- self.update_events['network'].add(EventType.BOOT)
self._ephemeral_dhcp_ctx = None
self.failed_desired_api_version = False
self.iso_dev = None
@@ -423,148 +429,154 @@ class DataSourceAzure(sources.DataSource):
# it determines the value of ret. More specifically, the first one in
# the candidate list determines the path to take in order to get the
# metadata we need.
- candidates = [self.seed_dir]
+ reprovision = False
+ ovf_is_accessible = True
+ reprovision_after_nic_attach = False
+ metadata_source = None
+ ret = None
if os.path.isfile(REPROVISION_MARKER_FILE):
- candidates.insert(0, "IMDS")
+ reprovision = True
+ metadata_source = "IMDS"
report_diagnostic_event("Reprovision marker file already present "
"before crawling Azure metadata: %s" %
REPROVISION_MARKER_FILE,
logger_func=LOG.debug)
elif os.path.isfile(REPROVISION_NIC_ATTACH_MARKER_FILE):
- candidates.insert(0, "NIC_ATTACH_MARKER_PRESENT")
+ reprovision_after_nic_attach = True
+ metadata_source = "NIC_ATTACH_MARKER_PRESENT"
report_diagnostic_event("Reprovision nic attach marker file "
"already present before crawling Azure "
"metadata: %s" %
REPROVISION_NIC_ATTACH_MARKER_FILE,
logger_func=LOG.debug)
- candidates.extend(list_possible_azure_ds_devs())
- if ddir:
- candidates.append(ddir)
-
- found = None
- reprovision = False
- ovf_is_accessible = True
- reprovision_after_nic_attach = False
- for cdev in candidates:
- try:
- LOG.debug("cdev: %s", cdev)
- if cdev == "IMDS":
- ret = None
- reprovision = True
- elif cdev == "NIC_ATTACH_MARKER_PRESENT":
- ret = None
- reprovision_after_nic_attach = True
- elif cdev.startswith("/dev/"):
- if util.is_FreeBSD():
- ret = util.mount_cb(cdev, load_azure_ds_dir,
- mtype="udf")
+ else:
+ for src in list_possible_azure_ds(self.seed_dir, ddir):
+ try:
+ if src.startswith("/dev/"):
+ if util.is_FreeBSD():
+ ret = util.mount_cb(src, load_azure_ds_dir,
+ mtype="udf")
+ else:
+ ret = util.mount_cb(src, load_azure_ds_dir)
+ # save the device for ejection later
+ self.iso_dev = src
+ ovf_is_accessible = True
else:
- ret = util.mount_cb(cdev, load_azure_ds_dir)
- else:
- ret = load_azure_ds_dir(cdev)
-
- except NonAzureDataSource:
- report_diagnostic_event(
- "Did not find Azure data source in %s" % cdev,
- logger_func=LOG.debug)
- continue
- except BrokenAzureDataSource as exc:
- msg = 'BrokenAzureDataSource: %s' % exc
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
- except util.MountFailedError:
- report_diagnostic_event(
- '%s was not mountable' % cdev, logger_func=LOG.debug)
- cdev = 'IMDS'
- ovf_is_accessible = False
- empty_md = {'local-hostname': ''}
- empty_cfg = dict(
- system_info=dict(
- default_user=dict(
- name=''
+ ret = load_azure_ds_dir(src)
+ metadata_source = src
+ break
+ except NonAzureDataSource:
+ report_diagnostic_event(
+ "Did not find Azure data source in %s" % src,
+ logger_func=LOG.debug)
+ continue
+ except util.MountFailedError:
+ report_diagnostic_event(
+ '%s was not mountable' % src,
+ logger_func=LOG.debug)
+ ovf_is_accessible = False
+ empty_md = {'local-hostname': ''}
+ empty_cfg = dict(
+ system_info=dict(
+ default_user=dict(
+ name=''
+ )
)
)
- )
- ret = (empty_md, '', empty_cfg, {})
-
- report_diagnostic_event("Found provisioning metadata in %s" % cdev,
- logger_func=LOG.debug)
+ ret = (empty_md, '', empty_cfg, {})
+ metadata_source = 'IMDS'
+ continue
+ except BrokenAzureDataSource as exc:
+ msg = 'BrokenAzureDataSource: %s' % exc
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
- # save the iso device for ejection before reporting ready
- if cdev.startswith("/dev"):
- self.iso_dev = cdev
+ report_diagnostic_event(
+ "Found provisioning metadata in %s" % metadata_source,
+ logger_func=LOG.debug)
- perform_reprovision = reprovision or self._should_reprovision(ret)
- perform_reprovision_after_nic_attach = (
- reprovision_after_nic_attach or
- self._should_reprovision_after_nic_attach(ret))
+ perform_reprovision = reprovision or self._should_reprovision(ret)
+ perform_reprovision_after_nic_attach = (
+ reprovision_after_nic_attach or
+ self._should_reprovision_after_nic_attach(ret))
- if perform_reprovision or perform_reprovision_after_nic_attach:
- if util.is_FreeBSD():
- msg = "Free BSD is not supported for PPS VMs"
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
- if perform_reprovision_after_nic_attach:
- self._wait_for_all_nics_ready()
- ret = self._reprovision()
+ if perform_reprovision or perform_reprovision_after_nic_attach:
+ if util.is_FreeBSD():
+ msg = "Free BSD is not supported for PPS VMs"
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
+ if perform_reprovision_after_nic_attach:
+ self._wait_for_all_nics_ready()
+ ret = self._reprovision()
- imds_md = self.get_imds_data_with_api_fallback(
- self.fallback_interface,
- retries=10
+ imds_md = self.get_imds_data_with_api_fallback(
+ self.fallback_interface,
+ retries=10
+ )
+ if not imds_md and not ovf_is_accessible:
+ msg = 'No OVF or IMDS available'
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
+ (md, userdata_raw, cfg, files) = ret
+ self.seed = metadata_source
+ crawled_data.update({
+ 'cfg': cfg,
+ 'files': files,
+ 'metadata': util.mergemanydict(
+ [md, {'imds': imds_md}]),
+ 'userdata_raw': userdata_raw})
+ imds_username = _username_from_imds(imds_md)
+ imds_hostname = _hostname_from_imds(imds_md)
+ imds_disable_password = _disable_password_from_imds(imds_md)
+ if imds_username:
+ LOG.debug('Username retrieved from IMDS: %s', imds_username)
+ cfg['system_info']['default_user']['name'] = imds_username
+ if imds_hostname:
+ LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
+ crawled_data['metadata']['local-hostname'] = imds_hostname
+ if imds_disable_password:
+ LOG.debug(
+ 'Disable password retrieved from IMDS: %s',
+ imds_disable_password
)
- if not imds_md and not ovf_is_accessible:
- msg = 'No OVF or IMDS available'
- report_diagnostic_event(msg)
- raise sources.InvalidMetaDataException(msg)
- (md, userdata_raw, cfg, files) = ret
- self.seed = cdev
- crawled_data.update({
- 'cfg': cfg,
- 'files': files,
- 'metadata': util.mergemanydict(
- [md, {'imds': imds_md}]),
- 'userdata_raw': userdata_raw})
- imds_username = _username_from_imds(imds_md)
- imds_hostname = _hostname_from_imds(imds_md)
- imds_disable_password = _disable_password_from_imds(imds_md)
- if imds_username:
- LOG.debug('Username retrieved from IMDS: %s', imds_username)
- cfg['system_info']['default_user']['name'] = imds_username
- if imds_hostname:
- LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
- crawled_data['metadata']['local-hostname'] = imds_hostname
- if imds_disable_password:
- LOG.debug(
- 'Disable password retrieved from IMDS: %s',
- imds_disable_password
- )
- crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
-
- # only use userdata from imds if OVF did not provide custom data
- # userdata provided by IMDS is always base64 encoded
- if not userdata_raw:
- imds_userdata = _userdata_from_imds(imds_md)
- if imds_userdata:
- LOG.debug("Retrieved userdata from IMDS")
- try:
- crawled_data['userdata_raw'] = base64.b64decode(
- ''.join(imds_userdata.split()))
- except Exception:
- report_diagnostic_event(
- "Bad userdata in IMDS",
- logger_func=LOG.warning)
- found = cdev
+ crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
- report_diagnostic_event(
- 'found datasource in %s' % cdev, logger_func=LOG.debug)
- break
+ if metadata_source == 'IMDS' and not crawled_data['files']:
+ try:
+ contents = build_minimal_ovf(
+ username=imds_username,
+ hostname=imds_hostname,
+ disableSshPwd=imds_disable_password)
+ crawled_data['files'] = {'ovf-env.xml': contents}
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed to construct OVF from IMDS data %s" % e,
+ logger_func=LOG.debug)
+
+ # only use userdata from imds if OVF did not provide custom data
+ # userdata provided by IMDS is always base64 encoded
+ if not userdata_raw:
+ imds_userdata = _userdata_from_imds(imds_md)
+ if imds_userdata:
+ LOG.debug("Retrieved userdata from IMDS")
+ try:
+ crawled_data['userdata_raw'] = base64.b64decode(
+ ''.join(imds_userdata.split()))
+ except Exception:
+ report_diagnostic_event(
+ "Bad userdata in IMDS",
+ logger_func=LOG.warning)
- if not found:
+ if not metadata_source:
msg = 'No Azure metadata found'
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
+ else:
+ report_diagnostic_event(
+ 'found datasource in %s' % metadata_source,
+ logger_func=LOG.debug)
- if found == ddir:
+ if metadata_source == ddir:
report_diagnostic_event(
"using files cached in %s" % ddir, logger_func=LOG.debug)
@@ -880,12 +892,12 @@ class DataSourceAzure(sources.DataSource):
logger_func=LOG.info)
return
- LOG.info("Attempting to bring %s up", ifname)
+ LOG.debug("Attempting to bring %s up", ifname)
attempts = 0
+ LOG.info("Unbinding and binding the interface %s", ifname)
while True:
- LOG.info("Unbinding and binding the interface %s", ifname)
devicename = net.read_sys_net(ifname,
'device/device_id').strip('{}')
util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/unbind',
@@ -900,16 +912,28 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(msg, logger_func=LOG.info)
return
- sleep_duration = 1
- msg = ("Link is not up after %d attempts with %d seconds sleep "
- "between attempts." % (attempts, sleep_duration))
-
if attempts % 10 == 0:
+ msg = ("Link is not up after %d attempts to rebind" % attempts)
report_diagnostic_event(msg, logger_func=LOG.info)
- else:
LOG.info(msg)
- sleep(sleep_duration)
+ # It could take some time after rebind for the interface to be up.
+ # So poll for the status for some time before attempting to rebind
+ # again.
+ sleep_duration = 0.5
+ max_status_polls = 20
+ LOG.debug("Polling %d seconds for primary NIC link up after "
+ "rebind.", sleep_duration * max_status_polls)
+
+ for i in range(0, max_status_polls):
+ if self.distro.networking.is_up(ifname):
+ msg = ("After %d attempts to rebind, link is up after "
+ "polling the link status %d times" % (attempts, i))
+ report_diagnostic_event(msg, logger_func=LOG.info)
+ LOG.debug(msg)
+ return
+ else:
+ sleep(sleep_duration)
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
@@ -960,7 +984,7 @@ class DataSourceAzure(sources.DataSource):
imds_md = None
metadata_poll_count = 0
metadata_logging_threshold = 1
- metadata_timeout_count = 0
+ expected_errors_count = 0
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
@@ -986,13 +1010,16 @@ class DataSourceAzure(sources.DataSource):
raise
# Retry polling network metadata for a limited duration only when the
- # calls fail due to timeout. This is because the platform drops packets
- # going towards IMDS when it is not a primary nic. If the calls fail
- # due to other issues like 410, 503 etc, then it means we are primary
- # but IMDS service is unavailable at the moment. Retry indefinitely in
- # those cases since we cannot move on without the network metadata.
+ # calls fail due to network unreachable error or timeout.
+ # This is because the platform drops packets going towards IMDS
+ # when it is not a primary nic. If the calls fail due to other issues
+ # like 410, 503 etc, then it means we are primary but IMDS service
+ # is unavailable at the moment. Retry indefinitely in those cases
+ # since we cannot move on without the network metadata. In the future,
+ # all this will not be necessary, as a new dhcp option would tell
+ # whether the nic is primary or not.
def network_metadata_exc_cb(msg, exc):
- nonlocal metadata_timeout_count, metadata_poll_count
+ nonlocal expected_errors_count, metadata_poll_count
nonlocal metadata_logging_threshold
metadata_poll_count = metadata_poll_count + 1
@@ -1012,9 +1039,13 @@ class DataSourceAzure(sources.DataSource):
(msg, exc.cause, exc.code),
logger_func=LOG.error)
- if exc.cause and isinstance(exc.cause, requests.Timeout):
- metadata_timeout_count = metadata_timeout_count + 1
- return (metadata_timeout_count <= 10)
+ # Retry up to a certain limit for both timeout and network
+ # unreachable errors.
+ if exc.cause and isinstance(
+ exc.cause, (requests.Timeout, requests.ConnectionError)
+ ):
+ expected_errors_count = expected_errors_count + 1
+ return (expected_errors_count <= 10)
return True
# Primary nic detection will be optimized in the future. The fact that
@@ -2079,18 +2110,18 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
@azure_ds_telemetry_reporter
-def list_possible_azure_ds_devs():
- devlist = []
+def list_possible_azure_ds(seed, cache_dir):
+ yield seed
+ yield DEFAULT_PROVISIONING_ISO_DEV
if util.is_FreeBSD():
cdrom_dev = "/dev/cd0"
if _check_freebsd_cdrom(cdrom_dev):
- return [cdrom_dev]
+ yield cdrom_dev
else:
for fstype in ("iso9660", "udf"):
- devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
-
- devlist.sort(reverse=True)
- return devlist
+ yield from util.find_devs_with("TYPE=%s" % fstype)
+ if cache_dir:
+ yield cache_dir
@azure_ds_telemetry_reporter
@@ -2309,8 +2340,8 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
LOG.info(
'Removing Ubuntu extended network scripts because'
' cloud-init updates Azure network configuration on the'
- ' following event: %s.',
- EventType.BOOT)
+ ' following events: %s.',
+ [EventType.BOOT.value, EventType.BOOT_LEGACY.value])
logged = True
if os.path.isdir(path):
util.del_dir(path)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 62756cf7..19c8d126 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -12,9 +12,8 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import subp
from cloudinit import util
-
+from cloudinit.event import EventScope, EventType
from cloudinit.net import eni
-
from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform
from cloudinit.sources.helpers import openstack
@@ -37,6 +36,13 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
dsname = 'ConfigDrive'
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }}
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 5040ce5b..08805d99 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -54,7 +54,7 @@ class DataSourceDigitalOcean(sources.DataSource):
if not is_do:
return False
- LOG.info("Running on digital ocean. droplet_id=%s", droplet_id)
+ LOG.info("Running on DigitalOcean. droplet_id=%s", droplet_id)
ipv4LL_nic = None
if self.use_ip4LL:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index a2105dc7..700437b0 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -8,6 +8,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import os
import time
@@ -20,7 +21,7 @@ from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
from cloudinit import warnings
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
@@ -75,6 +76,13 @@ class DataSourceEc2(sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }}
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
@@ -426,7 +434,12 @@ class DataSourceEc2(sources.DataSource):
# Non-VPC (aka Classic) Ec2 instances need to rewrite the
# network config file every boot due to MAC address change.
if self.is_classic_instance():
- self.update_events['network'].add(EventType.BOOT)
+ self.default_update_events = copy.deepcopy(
+ self.default_update_events)
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT)
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT_LEGACY)
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index bbeada0b..e909f058 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -98,10 +98,21 @@ class DataSourceOVF(sources.DataSource):
found.append(seed)
elif system_type and 'vmware' in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
+ allow_vmware_cust = False
+ allow_raw_data = False
if not self.vmware_customization_supported:
LOG.debug("Skipping the check for "
"VMware Customization support")
else:
+ allow_vmware_cust = not util.get_cfg_option_bool(
+ self.sys_cfg, "disable_vmware_customization", True)
+ allow_raw_data = util.get_cfg_option_bool(
+ self.ds_cfg, "allow_raw_data", True)
+
+ if not (allow_vmware_cust or allow_raw_data):
+ LOG.debug(
+ "Customization for VMware platform is disabled.")
+ else:
search_paths = (
"/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
"/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools")
@@ -148,19 +159,21 @@ class DataSourceOVF(sources.DataSource):
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
self._vmware_cust_conf)
- else:
- LOG.debug("Did not find VMware Customization Config File")
-
- # Honor disable_vmware_customization setting on metadata absent
- if not md_path:
- if util.get_cfg_option_bool(self.sys_cfg,
- "disable_vmware_customization",
- True):
+ # Don't handle the customization for below 2 cases:
+ # 1. meta data is found, allow_raw_data is False.
+ # 2. no meta data is found, allow_vmware_cust is False.
+ if md_path and not allow_raw_data:
LOG.debug(
- "Customization for VMware platform is disabled.")
+ "Customization using raw data is disabled.")
# reset vmwareImcConfigFilePath to None to avoid
# customization for VMware platform
vmwareImcConfigFilePath = None
+ if md_path is None and not allow_vmware_cust:
+ LOG.debug(
+ "Customization using VMware config is disabled.")
+ vmwareImcConfigFilePath = None
+ else:
+ LOG.debug("Did not find VMware Customization Config File")
use_raw_data = bool(vmwareImcConfigFilePath and md_path)
if use_raw_data:
@@ -345,8 +358,11 @@ class DataSourceOVF(sources.DataSource):
if contents:
break
if contents:
- (md, ud, cfg) = read_ovf_environment(contents)
+ read_network = ('com.vmware.guestinfo' == name)
+ (md, ud, cfg) = read_ovf_environment(contents, read_network)
self.environment = contents
+ if 'network-config' in md and md['network-config']:
+ self._network_config = md['network-config']
found.append(name)
# There was no OVF transports found
@@ -429,7 +445,7 @@ def get_max_wait_from_cfg(cfg):
LOG.warning("Failed to get '%s', using %s",
max_wait_cfg_option, default_max_wait)
- if max_wait <= 0:
+ if max_wait < 0:
LOG.warning("Invalid value '%s' for '%s', using '%s' instead",
max_wait, max_wait_cfg_option, default_max_wait)
max_wait = default_max_wait
@@ -440,6 +456,8 @@ def get_max_wait_from_cfg(cfg):
def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
dirpath="/var/run/vmware-imc"):
waited = 0
+ if maxwait <= naplen:
+ naplen = 1
while waited < maxwait:
fileFullPath = os.path.join(dirpath, filename)
@@ -492,13 +510,14 @@ def read_vmware_imc(config):
# This will return a dict with some content
# meta-data, user-data, some config
-def read_ovf_environment(contents):
+def read_ovf_environment(contents, read_network=False):
props = get_properties(contents)
md = {}
cfg = {}
ud = None
cfg_props = ['password']
md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
+ network_props = ['network-config']
for (prop, val) in props.items():
if prop == 'hostname':
prop = "local-hostname"
@@ -506,6 +525,12 @@ def read_ovf_environment(contents):
md[prop] = val
elif prop in cfg_props:
cfg[prop] = val
+ elif prop in network_props and read_network:
+ try:
+ network_config = base64.b64decode(val.encode())
+ md[prop] = safeload_yaml_or_dict(network_config).get('network')
+ except Exception:
+ LOG.debug("Ignore network-config in wrong format")
elif prop == "user-data":
try:
ud = base64.b64decode(val.encode())
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 619a171e..a85b71d7 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -8,11 +8,11 @@ import time
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
+from cloudinit.event import EventScope, EventType
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit.sources.helpers import openstack
from cloudinit.sources import DataSourceOracle as oracle
@@ -46,6 +46,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG
+ }}
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index 0b8994bf..bb69e998 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -17,7 +17,7 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import subp
from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
ETC_HOSTS = '/etc/hosts'
@@ -206,10 +206,11 @@ def read_user_data_callback(mount_dir):
class DataSourceRbxCloud(sources.DataSource):
dsname = "RbxCloud"
- update_events = {'network': [
+ default_update_events = {EventScope.NETWORK: {
EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT
- ]}
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }}
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 41be7665..7b8974a2 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -31,8 +31,8 @@ from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
from cloudinit import net
+from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -172,7 +172,13 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
dsname = "Scaleway"
- update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index fd292baa..9b16bf8d 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -36,7 +36,7 @@ from cloudinit import serial
from cloudinit import sources
from cloudinit import subp
from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
@@ -170,6 +170,11 @@ class DataSourceSmartOS(sources.DataSource):
smartos_type = sources.UNSET
md_client = sources.UNSET
+ default_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }}
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -181,7 +186,6 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = {}
self.network_data = None
self._network_config = None
- self.update_events['network'].add(EventType.BOOT)
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
new file mode 100644
index 00000000..22ca63de
--- /dev/null
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -0,0 +1,871 @@
+# Cloud-Init DataSource for VMware
+#
+# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Anish Swaminathan <anishs@vmware.com>
+# Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Cloud-Init DataSource for VMware
+
+This module provides a cloud-init datasource for VMware systems and supports
+multiple transports types, including:
+
+ * EnvVars
+ * GuestInfo
+
+Netifaces (https://github.com/al45tair/netifaces)
+
+ Please note this module relies on the netifaces project to introspect the
+ runtime, network configuration of the host on which this datasource is
+ running. This is in contrast to the rest of cloud-init which uses the
+ cloudinit/netinfo module.
+
+ The reasons for using netifaces include:
+
+ * Netifaces is built in C and is more portable across multiple systems
+ and more deterministic than shell exec'ing local network commands and
+ parsing their output.
+
+ * Netifaces provides a stable way to determine the view of the host's
+ network after DHCP has brought the network online. Unlike most other
+ datasources, this datasource still provides support for JINJA queries
+ based on networking information even when the network is based on a
+ DHCP lease. While this does not tie this datasource directly to
+ netifaces, it does mean the ability to consistently obtain the
+ correct information is paramount.
+
+ * It is currently possible to execute this datasource on macOS
+ (which many developers use today) to print the output of the
+ get_host_info function. This function calls netifaces to obtain
+ the same runtime network configuration that the datasource would
+ persist to the local system's instance data.
+
+ However, the netinfo module fails on macOS. The result is either a
+ hung operation that requires a SIGINT to return control to the user,
+ or, if brew is used to install iproute2mac, the ip commands are used
+ but produce output the netinfo module is unable to parse.
+
+ While macOS is not a target of cloud-init, this feature is quite
+ useful when working on this datasource.
+
+ For more information about this behavior, please see the following
+ PR comment, https://bit.ly/3fG7OVh.
+
+ The authors of this datasource are not opposed to moving away from
+ netifaces. The goal may be to eventually do just that. This proviso was
+ added to the top of this module as a way to remind future-us and others
+ why netifaces was used in the first place in order to either smooth the
+ transition away from netifaces or embrace it further up the cloud-init
+ stack.
+"""
+
+import collections
+import copy
+from distutils.spawn import find_executable
+import ipaddress
+import json
+import os
+import socket
+import time
+
+from cloudinit import dmi, log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.subp import subp, ProcessExecutionError
+
+import netifaces
+
+
+PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
+
+LOG = logging.getLogger(__name__)
+NOVAL = "No value found"
+
+DATA_ACCESS_METHOD_ENVVAR = "envvar"
+DATA_ACCESS_METHOD_GUESTINFO = "guestinfo"
+
+VMWARE_RPCTOOL = find_executable("vmware-rpctool")
+REDACT = "redact"
+CLEANUP_GUESTINFO = "cleanup-guestinfo"
+VMX_GUESTINFO = "VMX_GUESTINFO"
+GUESTINFO_EMPTY_YAML_VAL = "---"
+
+LOCAL_IPV4 = "local-ipv4"
+LOCAL_IPV6 = "local-ipv6"
+WAIT_ON_NETWORK = "wait-on-network"
+WAIT_ON_NETWORK_IPV4 = "ipv4"
+WAIT_ON_NETWORK_IPV6 = "ipv6"
+
+
+class DataSourceVMware(sources.DataSource):
+ """
+ Setting the hostname:
+ The hostname is set by way of the metadata key "local-hostname".
+
+ Setting the instance ID:
+ The instance ID may be set by way of the metadata key "instance-id".
+ However, if this value is absent then the instance ID is read
+ from the file /sys/class/dmi/id/product_uuid.
+
+ Configuring the network:
+ The network is configured by setting the metadata key "network"
+ with a value consistent with Network Config Versions 1 or 2,
+ depending on the Linux distro's version of cloud-init:
+
+ Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1
+ Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2
+
+ For example, CentOS 7's official cloud-init package is version
+ 0.7.9 and does not support Network Config Version 2. However,
+ this datasource still supports supplying Network Config Version 2
+ data as long as the Linux distro's cloud-init package is new
+ enough to parse the data.
+
+ The metadata key "network.encoding" may be used to indicate the
+ format of the metadata key "network". Valid encodings are base64
+ and gzip+base64.
+ """
+
+ dsname = "VMware"
+
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+
+ self.data_access_method = None
+ self.vmware_rpctool = VMWARE_RPCTOOL
+
+ def _get_data(self):
+ """
+ _get_data loads the metadata, userdata, and vendordata from one of
+ the following locations in the given order:
+
+ * envvars
+ * guestinfo
+
+ Please note when updating this function with support for new data
+ transports, the order should match the order in the dscheck_VMware
+ function from the file ds-identify.
+ """
+
+ # Initialize the locally scoped metadata, userdata, and vendordata
+ # variables. They are assigned below depending on the detected data
+ # access method.
+ md, ud, vd = None, None, None
+
+ # First check to see if there is data via env vars.
+ if os.environ.get(VMX_GUESTINFO, ""):
+ md = guestinfo_envvar("metadata")
+ ud = guestinfo_envvar("userdata")
+ vd = guestinfo_envvar("vendordata")
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_ENVVAR
+
+ # At this point, all additional data transports are valid only on
+ # a VMware platform.
+ if not self.data_access_method:
+ system_type = dmi.read_dmi_data("system-product-name")
+ if system_type is None:
+ LOG.debug("No system-product-name found")
+ return False
+ if "vmware" not in system_type.lower():
+ LOG.debug("Not a VMware platform")
+ return False
+
+ # If no data was detected, check the guestinfo transport next.
+ if not self.data_access_method:
+ if self.vmware_rpctool:
+ md = guestinfo("metadata", self.vmware_rpctool)
+ ud = guestinfo("userdata", self.vmware_rpctool)
+ vd = guestinfo("vendordata", self.vmware_rpctool)
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO
+
+ if not self.data_access_method:
+ LOG.error("failed to find a valid data access method")
+ return False
+
+ LOG.info("using data access method %s", self._get_subplatform())
+
+ # Get the metadata.
+ self.metadata = process_metadata(load_json_or_yaml(md))
+
+ # Get the user data.
+ self.userdata_raw = ud
+
+ # Get the vendor data.
+ self.vendordata_raw = vd
+
+ # Redact any sensitive information.
+ self.redact_keys()
+
+ # get_data returns true if there is any available metadata,
+ # userdata, or vendordata.
+ if self.metadata or self.userdata_raw or self.vendordata_raw:
+ return True
+ else:
+ return False
+
+ def setup(self, is_new_instance):
+ """setup(is_new_instance)
+
+ This is called before user-data and vendor-data have been processed.
+
+ Unless the datasource has set mode to 'local', then networking
+ per 'fallback' or per 'network_config' will have been written and
+ brought up the OS at this point.
+ """
+
+ host_info = wait_on_network(self.metadata)
+ LOG.info("got host-info: %s", host_info)
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ advertise_local_ip_addrs(host_info)
+
+ # Ensure the metadata gets updated with information about the
+ # host, including the network interfaces, default IP addresses,
+ # etc.
+ self.metadata = util.mergemanydict([self.metadata, host_info])
+
+ # Persist the instance data for versions of cloud-init that support
+ # doing so. This occurs here rather than in the get_data call in
+ # order to ensure that the network interfaces are up and can be
+ # persisted with the metadata.
+ self.persist_instance_data()
+
+ def _get_subplatform(self):
+ get_key_name_fn = None
+ if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR:
+ get_key_name_fn = get_guestinfo_envvar_key_name
+ elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ get_key_name_fn = get_guestinfo_key_name
+ else:
+ return sources.METADATA_UNKNOWN
+
+ return "%s (%s)" % (
+ self.data_access_method,
+ get_key_name_fn("metadata"),
+ )
+
+ @property
+ def network_config(self):
+ if "network" in self.metadata:
+ LOG.debug("using metadata network config")
+ else:
+ LOG.debug("using fallback network config")
+ self.metadata["network"] = {
+ "config": self.distro.generate_fallback_config(),
+ }
+ return self.metadata["network"]["config"]
+
+ def get_instance_id(self):
+ # Pull the instance ID out of the metadata if present. Otherwise
+ # read the file /sys/class/dmi/id/product_uuid for the instance ID.
+ if self.metadata and "instance-id" in self.metadata:
+ return self.metadata["instance-id"]
+ with open(PRODUCT_UUID_FILE_PATH, "r") as id_file:
+ self.metadata["instance-id"] = str(id_file.read()).rstrip().lower()
+ return self.metadata["instance-id"]
+
+ def get_public_ssh_keys(self):
+ for key_name in (
+ "public-keys-data",
+ "public_keys_data",
+ "public-keys",
+ "public_keys",
+ ):
+ if key_name in self.metadata:
+ return sources.normalize_pubkey_data(self.metadata[key_name])
+ return []
+
+ def redact_keys(self):
+ # Determine if there are any keys to redact.
+ keys_to_redact = None
+ if REDACT in self.metadata:
+ keys_to_redact = self.metadata[REDACT]
+ elif CLEANUP_GUESTINFO in self.metadata:
+ # This is for backwards compatibility.
+ keys_to_redact = self.metadata[CLEANUP_GUESTINFO]
+
+ if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool)
+
+
+def decode(key, enc_type, data):
+ """
+ decode returns the decoded string value of data
+ key is a string used to identify the data being decoded in log messages
+ """
+ LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type)
+
+ raw_data = None
+ if enc_type in ["gzip+base64", "gz+b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.decomp_gzip(util.b64d(data))
+ elif enc_type in ["base64", "b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.b64d(data)
+ else:
+ LOG.debug("Plain-text data %s", key)
+ raw_data = data
+
+ return util.decode_binary(raw_data)
+
+
+def get_none_if_empty_val(val):
+ """
+ get_none_if_empty_val returns None if the provided value, once stripped
+ of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL.
+
+ The return value is always a string, regardless of whether the input is
+ a bytes class or a string.
+ """
+
+ # If the provided value is a bytes class, convert it to a string to
+ # simplify the rest of this function's logic.
+ val = util.decode_binary(val)
+ val = val.rstrip()
+ if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL:
+ return None
+ return val
+
+
+def advertise_local_ip_addrs(host_info):
+ """
+ advertise_local_ip_addrs gets the local IP address information from
+ the provided host_info map and sets the addresses in the guestinfo
+ namespace
+ """
+ if not host_info:
+ return
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ local_ipv4 = host_info.get(LOCAL_IPV4)
+ if local_ipv4:
+ guestinfo_set_value(LOCAL_IPV4, local_ipv4)
+ LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4)
+
+ local_ipv6 = host_info.get(LOCAL_IPV6)
+ if local_ipv6:
+ guestinfo_set_value(LOCAL_IPV6, local_ipv6)
+ LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6)
+
+
+def handle_returned_guestinfo_val(key, val):
+ """
+ handle_returned_guestinfo_val returns the provided value if it is
+ not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is
+ returned
+ """
+ val = get_none_if_empty_val(val)
+ if val:
+ return val
+ LOG.debug("No value found for key %s", key)
+ return None
+
+
+def get_guestinfo_key_name(key):
+ return "guestinfo." + key
+
+
+def get_guestinfo_envvar_key_name(key):
+ return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1)
+
+
+def guestinfo_envvar(key):
+ val = guestinfo_envvar_get_value(key)
+ if not val:
+ return None
+ enc_type = guestinfo_envvar_get_value(key + ".encoding")
+ return decode(get_guestinfo_envvar_key_name(key), enc_type, val)
+
+
+def guestinfo_envvar_get_value(key):
+ env_key = get_guestinfo_envvar_key_name(key)
+ return handle_returned_guestinfo_val(key, os.environ.get(env_key, ""))
+
+
+def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo returns the guestinfo value for the provided key, decoding
+ the value when required
+ """
+ val = guestinfo_get_value(key, vmware_rpctool)
+ if not val:
+ return None
+ enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool)
+ return decode(get_guestinfo_key_name(key), enc_type, val)
+
+
+def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Returns a guestinfo value for the specified key.
+ """
+ LOG.debug("Getting guestinfo value for key %s", key)
+
+ try:
+ (stdout, stderr) = subp(
+ [
+ vmware_rpctool,
+ "info-get " + get_guestinfo_key_name(key),
+ ]
+ )
+ if stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ elif not stdout:
+ LOG.error("Failed to get guestinfo value for key %s", key)
+ return handle_returned_guestinfo_val(key, stdout)
+ except ProcessExecutionError as error:
+ if error.stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ else:
+ util.logexc(
+ LOG,
+ "Failed to get guestinfo value for key %s: %s",
+ key,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to get "
+ + "guestinfo value for key %s",
+ key,
+ )
+
+ return None
+
+
+def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Sets a guestinfo value for the specified key. Set value to an empty string
+ to clear an existing guestinfo key.
+ """
+
+ # If value is an empty string then set it to a single space as it is not
+ # possible to set a guestinfo key to an empty string. Setting a guestinfo
+ # key to a single space is as close as it gets to clearing an existing
+ # guestinfo key.
+ if value == "":
+ value = " "
+
+ LOG.debug("Setting guestinfo key=%s to value=%s", key, value)
+
+ try:
+ subp(
+ [
+ vmware_rpctool,
+ ("info-set %s %s" % (get_guestinfo_key_name(key), value)),
+ ]
+ )
+ return True
+ except ProcessExecutionError as error:
+ util.logexc(
+ LOG,
+ "Failed to set guestinfo key=%s to value=%s: %s",
+ key,
+ value,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to set "
+ + "guestinfo key=%s to value=%s",
+ key,
+ value,
+ )
+
+ return None
+
+
+def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo_redact_keys redacts guestinfo of all of the keys in the given
+ list. each key will have its value set to "---". Since the value is valid
+ YAML, cloud-init can still read it if it tries.
+ """
+ if not keys:
+ return
+ if not type(keys) in (list, tuple):
+ keys = [keys]
+ for key in keys:
+ key_name = get_guestinfo_key_name(key)
+ LOG.info("clearing %s", key_name)
+ if not guestinfo_set_value(
+ key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool
+ ):
+ LOG.error("failed to clear %s", key_name)
+ LOG.info("clearing %s.encoding", key_name)
+ if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool):
+ LOG.error("failed to clear %s.encoding", key_name)
+
+
+def load_json_or_yaml(data):
+ """
+ load first attempts to unmarshal the provided data as JSON, and if
+ that fails then attempts to unmarshal the data as YAML. If data is
+ None then a new dictionary is returned.
+ """
+ if not data:
+ return {}
+ try:
+ return util.load_json(data)
+ except (json.JSONDecodeError, TypeError):
+ return util.load_yaml(data)
+
+
+def process_metadata(data):
+ """
+ process_metadata processes metadata and loads the optional network
+ configuration.
+ """
+ network = None
+ if "network" in data:
+ network = data["network"]
+ del data["network"]
+
+ network_enc = None
+ if "network.encoding" in data:
+ network_enc = data["network.encoding"]
+ del data["network.encoding"]
+
+ if network:
+ if isinstance(network, collections.abc.Mapping):
+ LOG.debug("network data copied to 'config' key")
+ network = {"config": copy.deepcopy(network)}
+ else:
+ LOG.debug("network data to be decoded %s", network)
+ dec_net = decode("metadata.network", network_enc, network)
+ network = {
+ "config": load_json_or_yaml(dec_net),
+ }
+
+ LOG.debug("network data %s", network)
+ data["network"] = network
+
+ return data
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local
+ (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+def get_datasource_list(depends):
+ """
+ Return a list of data sources that match this set of dependencies
+ """
+ return sources.list_from_depends(depends, datasources)
+
+
+def get_default_ip_addrs():
+ """
+ Returns the default IPv4 and IPv6 addresses based on the device(s) used for
+ the default route. Please note that None may be returned for either address
+ family if that family has no default route or if there are multiple
+ addresses associated with the device used by the default route for a given
+ address.
+ """
+ # TODO(promote and use netifaces in cloudinit.net* modules)
+ gateways = netifaces.gateways()
+ if "default" not in gateways:
+ return None, None
+
+ default_gw = gateways["default"]
+ if (
+ netifaces.AF_INET not in default_gw
+ and netifaces.AF_INET6 not in default_gw
+ ):
+ return None, None
+
+ ipv4 = None
+ ipv6 = None
+
+ gw4 = default_gw.get(netifaces.AF_INET)
+ if gw4:
+ _, dev4 = gw4
+ addr4_fams = netifaces.ifaddresses(dev4)
+ if addr4_fams:
+ af_inet4 = addr4_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev4,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ # Try to get the default IPv6 address by first seeing if there is a default
+ # IPv6 route.
+ gw6 = default_gw.get(netifaces.AF_INET6)
+ if gw6:
+ _, dev6 = gw6
+ addr6_fams = netifaces.ifaddresses(dev6)
+ if addr6_fams:
+ af_inet6 = addr6_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev6,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv4 address but not IPv6, then see if there is a
+ # single IPv6 address associated with the same device associated with the
+ # default IPv4 address.
+ if ipv4 and not ipv6:
+ af_inet6 = addr4_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev4,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv6 address but not IPv4, then see if there is a
+ # single IPv4 address associated with the same device associated with the
+ # default IPv6 address.
+ if not ipv4 and ipv6:
+ af_inet4 = addr6_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev6,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ return ipv4, ipv6
+
+
+# patched socket.getfqdn() - see https://bugs.python.org/issue5004
+
+
+def getfqdn(name=""):
+ """Get fully qualified domain name from name.
+ An empty argument is interpreted as meaning the local host.
+ """
+ # TODO(may want to promote this function to util.getfqdn)
+ # TODO(may want to extend util.get_hostname to accept fqdn=True param)
+ name = name.strip()
+ if not name or name == "0.0.0.0":
+ name = util.get_hostname()
+ try:
+ addrs = socket.getaddrinfo(
+ name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME
+ )
+ except socket.error:
+ pass
+ else:
+ for addr in addrs:
+ if addr[3]:
+ name = addr[3]
+ break
+ return name
+
+
+def is_valid_ip_addr(val):
+ """
+ Returns false if the address is loopback, link local or unspecified;
+ otherwise true is returned.
+ """
+ # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc)
+ # TODO(migrate to use cloudinit.net.is_ip_addr)#
+
+ addr = None
+ try:
+ addr = ipaddress.ip_address(val)
+ except ipaddress.AddressValueError:
+ addr = ipaddress.ip_address(str(val))
+ except Exception:
+ return None
+
+ if addr.is_link_local or addr.is_loopback or addr.is_unspecified:
+ return False
+ return True
+
+
+def get_host_info():
+ """
+ Returns host information such as the host name and network interfaces.
+ """
+ # TODO(look to promote netifices use up in cloud-init netinfo funcs)
+ host_info = {
+ "network": {
+ "interfaces": {
+ "by-mac": collections.OrderedDict(),
+ "by-ipv4": collections.OrderedDict(),
+ "by-ipv6": collections.OrderedDict(),
+ },
+ },
+ }
+ hostname = getfqdn(util.get_hostname())
+ if hostname:
+ host_info["hostname"] = hostname
+ host_info["local-hostname"] = hostname
+ host_info["local_hostname"] = hostname
+
+ default_ipv4, default_ipv6 = get_default_ip_addrs()
+ if default_ipv4:
+ host_info[LOCAL_IPV4] = default_ipv4
+ if default_ipv6:
+ host_info[LOCAL_IPV6] = default_ipv6
+
+ by_mac = host_info["network"]["interfaces"]["by-mac"]
+ by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"]
+ by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"]
+
+ ifaces = netifaces.interfaces()
+ for dev_name in ifaces:
+ addr_fams = netifaces.ifaddresses(dev_name)
+ af_link = addr_fams.get(netifaces.AF_LINK)
+ af_inet4 = addr_fams.get(netifaces.AF_INET)
+ af_inet6 = addr_fams.get(netifaces.AF_INET6)
+
+ mac = None
+ if af_link and "addr" in af_link[0]:
+ mac = af_link[0]["addr"]
+
+ # Do not bother recording localhost
+ if mac == "00:00:00:00:00:00":
+ continue
+
+ if mac and (af_inet4 or af_inet6):
+ key = mac
+ val = {}
+ if af_inet4:
+ af_inet4_vals = []
+ for ip_info in af_inet4:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet4_vals.append(ip_info)
+ val["ipv4"] = af_inet4_vals
+ if af_inet6:
+ af_inet6_vals = []
+ for ip_info in af_inet6:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet6_vals.append(ip_info)
+ val["ipv6"] = af_inet6_vals
+ by_mac[key] = val
+
+ if af_inet4:
+ for ip_info in af_inet4:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv4[key] = val
+
+ if af_inet6:
+ for ip_info in af_inet6:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv6[key] = val
+
+ return host_info
+
+
+def wait_on_network(metadata):
+ # Determine whether we need to wait on the network coming online.
+ wait_on_ipv4 = False
+ wait_on_ipv6 = False
+ if WAIT_ON_NETWORK in metadata:
+ wait_on_network = metadata[WAIT_ON_NETWORK]
+ if WAIT_ON_NETWORK_IPV4 in wait_on_network:
+ wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4]
+ if isinstance(wait_on_ipv4_val, bool):
+ wait_on_ipv4 = wait_on_ipv4_val
+ else:
+ wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val)
+ if WAIT_ON_NETWORK_IPV6 in wait_on_network:
+ wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6]
+ if isinstance(wait_on_ipv6_val, bool):
+ wait_on_ipv6 = wait_on_ipv6_val
+ else:
+ wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val)
+
+ # Get information about the host.
+ host_info = None
+ while host_info is None:
+ # This loop + sleep results in two logs every second while waiting
+ # for either ipv4 or ipv6 up. Do we really need to log each iteration
+ # or can we log once and log on successful exit?
+ host_info = get_host_info()
+
+ network = host_info.get("network") or {}
+ interfaces = network.get("interfaces") or {}
+ by_ipv4 = interfaces.get("by-ipv4") or {}
+ by_ipv6 = interfaces.get("by-ipv6") or {}
+
+ if wait_on_ipv4:
+ ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False
+ if not ipv4_ready:
+ host_info = None
+
+ if wait_on_ipv6:
+ ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False
+ if not ipv6_ready:
+ host_info = None
+
+ if host_info is None:
+ LOG.debug(
+ "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s",
+ wait_on_ipv4,
+ ipv4_ready,
+ wait_on_ipv6,
+ ipv6_ready,
+ )
+ time.sleep(1)
+
+ LOG.debug("waiting on network complete")
+ return host_info
+
+
+def main():
+ """
+ Executed when this file is used as a program.
+ """
+ try:
+ logging.setupBasicLogging()
+ except Exception:
+ pass
+ metadata = {
+ "wait-on-network": {"ipv4": True, "ipv6": "false"},
+ "network": {"config": {"dhcp": True}},
+ }
+ host_info = wait_on_network(metadata)
+ metadata = util.mergemanydict([metadata, host_info])
+ print(util.json_dumps(metadata))
+
+
+if __name__ == "__main__":
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7d74f8d9..cc7e1c3c 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,6 +13,7 @@ import copy
import json
import os
from collections import namedtuple
+from typing import Dict, List
from cloudinit import dmi
from cloudinit import importer
@@ -22,7 +23,8 @@ from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.atomic_helper import write_json
-from cloudinit.event import EventType
+from cloudinit.distros import Distro
+from cloudinit.event import EventScope, EventType
from cloudinit.filters import launch_index
from cloudinit.persistence import CloudInitPickleMixin
from cloudinit.reporting import events
@@ -74,6 +76,10 @@ NetworkConfigSource = namedtuple('NetworkConfigSource',
_NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+class DatasourceUnpickleUserDataError(Exception):
+ """Raised when userdata is unable to be unpickled due to python upgrades"""
+
+
class DataSourceNotFoundException(Exception):
pass
@@ -175,12 +181,23 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
- # network configuration on metadata changes.
- # A datasource which supports writing network config on each system boot
- # would call update_events['network'].add(EventType.BOOT).
+ # network configuration on metadata changes. These are defined in
+ # `supported_network_events`.
+ # The datasource also defines a set of default EventTypes that the
+ # datasource can react to. These are the event types that will be used
+ # if not overridden by the user.
+ # A datasource requiring to write network config on each system boot
+ # would call default_update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }}
+ default_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ }}
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
@@ -199,7 +216,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
_ci_pkl_version = 1
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ def __init__(self, sys_cfg, distro: Distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
self.paths = paths
@@ -227,6 +244,20 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self.vendordata2 = None
if not hasattr(self, 'vendordata2_raw'):
self.vendordata2_raw = None
+ if hasattr(self, 'userdata') and self.userdata is not None:
+ # If userdata stores MIME data, on < python3.6 it will be
+ # missing the 'policy' attribute that exists on >=python3.6.
+ # Calling str() on the userdata will attempt to access this
+ # policy attribute. This will raise an exception, causing
+ # the pickle load to fail, so cloud-init will discard the cache
+ try:
+ str(self.userdata)
+ except AttributeError as e:
+ LOG.debug(
+ "Unable to unpickle datasource: %s."
+ " Ignoring current cache.", e
+ )
+ raise DatasourceUnpickleUserDataError() from e
def __str__(self):
return type_utils.obj_name(self)
@@ -648,10 +679,22 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
- def update_metadata(self, source_event_types):
+ def get_supported_events(self, source_event_types: List[EventType]):
+ supported_events = {} # type: Dict[EventScope, set]
+ for event in source_event_types:
+ for update_scope, update_events in self.supported_update_events.items(): # noqa: E501
+ if event in update_events:
+ if not supported_events.get(update_scope):
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
+ return supported_events
+
+ def update_metadata_if_supported(
+ self, source_event_types: List[EventType]
+ ) -> bool:
"""Refresh cached metadata if the datasource supports this event.
- The datasource has a list of update_events which
+ The datasource has a list of supported_update_events which
trigger refreshing all cached metadata as well as refreshing the
network configuration.
@@ -661,17 +704,12 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@return True if the datasource did successfully update cached metadata
due to source_event_type.
"""
- supported_events = {}
- for event in source_event_types:
- for update_scope, update_events in self.update_events.items():
- if event in update_events:
- if not supported_events.get(update_scope):
- supported_events[update_scope] = set()
- supported_events[update_scope].add(event)
+ supported_events = self.get_supported_events(source_event_types)
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
- scope, ', '.join(matched_events))
+ scope.value,
+ ', '.join([event.value for event in matched_events]))
# Each datasource has a cached config property which needs clearing
# Once cleared that config property will be regenerated from
# current metadata.
@@ -682,7 +720,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if result:
return True
LOG.debug("Datasource %s not updated for events: %s", self,
- ', '.join(source_event_types))
+ ', '.join([event.value for event in source_event_types]))
return False
def check_instance_id(self, sys_cfg):
@@ -789,7 +827,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
s = cls(sys_cfg, distro, paths)
- if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
+ if s.update_metadata_if_supported(
+ [EventType.BOOT_NEW_INSTANCE]
+ ):
myrep.message = "found %s data from %s" % (mode, name)
return (s, type_utils.obj_name(cls))
except Exception:
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index ad476076..a5ac1d57 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -344,6 +344,40 @@ def http_with_retries(url, **kwargs) -> str:
raise exc
+def build_minimal_ovf(
+ username: str,
+ hostname: str,
+ disableSshPwd: str) -> bytes:
+ OVF_ENV_TEMPLATE = textwrap.dedent('''\
+ <ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns1="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <ns1:ProvisioningSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:LinuxProvisioningConfigurationSet>
+ <ns1:ConfigurationSetType>LinuxProvisioningConfiguration
+ </ns1:ConfigurationSetType>
+ <ns1:UserName>{username}</ns1:UserName>
+ <ns1:DisableSshPasswordAuthentication>{disableSshPwd}
+ </ns1:DisableSshPasswordAuthentication>
+ <ns1:HostName>{hostname}</ns1:HostName>
+ </ns1:LinuxProvisioningConfigurationSet>
+ </ns1:ProvisioningSection>
+ <ns1:PlatformSettingsSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:PlatformSettings>
+ <ns1:ProvisionGuestAgent>true</ns1:ProvisionGuestAgent>
+ </ns1:PlatformSettings>
+ </ns1:PlatformSettingsSection>
+ </ns0:Environment>
+ ''')
+ ret = OVF_ENV_TEMPLATE.format(
+ username=username,
+ hostname=hostname,
+ disableSshPwd=disableSshPwd)
+ return ret.encode('utf-8')
+
+
class AzureEndpointHttpClient:
headers = {
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 1420a988..a2b052a6 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -5,7 +5,7 @@ import inspect
import os
import stat
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit.helpers import Paths
from cloudinit import importer
from cloudinit.sources import (
@@ -618,24 +618,29 @@ class TestDataSource(CiTestCase):
self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
self.assertEqual('updated', self.datasource.myattr)
+ @mock.patch.dict(DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ @mock.patch.dict(DataSource.supported_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
def test_update_metadata_only_acts_on_supported_update_events(self):
- """update_metadata won't get_data on unsupported update events."""
- self.datasource.update_events['network'].discard(EventType.BOOT)
+ """update_metadata_if_supported wont get_data on unsupported events."""
self.assertEqual(
- {'network': set([EventType.BOOT_NEW_INSTANCE])},
- self.datasource.update_events)
+ {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])},
+ self.datasource.default_update_events
+ )
def fake_get_data():
raise Exception('get_data should not be called')
self.datasource.get_data = fake_get_data
self.assertFalse(
- self.datasource.update_metadata(
+ self.datasource.update_metadata_if_supported(
source_event_types=[EventType.BOOT]))
+ @mock.patch.dict(DataSource.supported_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
def test_update_metadata_returns_true_on_supported_update_event(self):
- """update_metadata returns get_data response on supported events."""
-
+ """update_metadata_if_supported returns get_data on supported events"""
def fake_get_data():
return True
@@ -643,14 +648,16 @@ class TestDataSource(CiTestCase):
self.datasource._network_config = 'something'
self.datasource._dirty_cache = True
self.assertTrue(
- self.datasource.update_metadata(
+ self.datasource.update_metadata_if_supported(
source_event_types=[
EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
self.assertEqual(UNSET, self.datasource._network_config)
+
self.assertIn(
"DEBUG: Update datasource metadata and network config due to"
- " events: New instance first boot",
- self.logs.getvalue())
+ " events: boot-new-instance",
+ self.logs.getvalue()
+ )
class TestRedactSensitiveData(CiTestCase):
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index c08042d6..9ccadf09 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -249,16 +249,151 @@ def render_authorizedkeysfile_paths(value, homedir, username):
return rendered
+# Inspired from safe_path() in openssh source code (misc.c).
+def check_permissions(username, current_path, full_path, is_file, strictmodes):
+ """Check if the file/folder in @current_path has the right permissions.
+
+ We need to check that:
+ 1. If StrictMode is enabled, the owner is either root or the user
+ 2. the user can access the file/folder, otherwise ssh won't use it
+ 3. If StrictMode is enabled, no write permission is given to group
+ and world users (022)
+ """
+
+ # group/world can only execute the folder (access)
+ minimal_permissions = 0o711
+ if is_file:
+ # group/world can only read the file
+ minimal_permissions = 0o644
+
+ # 1. owner must be either root or the user itself
+ owner = util.get_owner(current_path)
+ if strictmodes and owner != username and owner != "root":
+ LOG.debug("Path %s in %s must be own by user %s or"
+ " by root, but instead is own by %s. Ignoring key.",
+ current_path, full_path, username, owner)
+ return False
+
+ parent_permission = util.get_permissions(current_path)
+ # 2. the user can access the file/folder, otherwise ssh won't use it
+ if owner == username:
+ # need only the owner permissions
+ minimal_permissions &= 0o700
+ else:
+ group_owner = util.get_group(current_path)
+ user_groups = util.get_user_groups(username)
+
+ if group_owner in user_groups:
+ # need only the group permissions
+ minimal_permissions &= 0o070
+ else:
+ # need only the world permissions
+ minimal_permissions &= 0o007
+
+ if parent_permission & minimal_permissions == 0:
+ LOG.debug("Path %s in %s must be accessible by user %s,"
+ " check its permissions",
+ current_path, full_path, username)
+ return False
+
+ # 3. no write permission (w) is given to group and world users (022)
+ # Group and world user can still have +rx.
+ if strictmodes and parent_permission & 0o022 != 0:
+ LOG.debug("Path %s in %s must not give write"
+ "permission to group or world users. Ignoring key.",
+ current_path, full_path)
+ return False
+
+ return True
+
+
+def check_create_path(username, filename, strictmodes):
+ user_pwent = users_ssh_info(username)[1]
+ root_pwent = users_ssh_info("root")[1]
+ try:
+ # check the directories first
+ directories = filename.split("/")[1:-1]
+
+ # scan in order, from root to file name
+ parent_folder = ""
+ # this is to comply also with unit tests, and
+ # strange home directories
+ home_folder = os.path.dirname(user_pwent.pw_dir)
+ for directory in directories:
+ parent_folder += "/" + directory
+
+ # security check, disallow symlinks in the AuthorizedKeysFile path.
+ if os.path.islink(parent_folder):
+ LOG.debug(
+ "Invalid directory. Symlink exists in path: %s",
+ parent_folder)
+ return False
+
+ if os.path.isfile(parent_folder):
+ LOG.debug(
+ "Invalid directory. File exists in path: %s",
+ parent_folder)
+ return False
+
+ if (home_folder.startswith(parent_folder) or
+ parent_folder == user_pwent.pw_dir):
+ continue
+
+ if not os.path.exists(parent_folder):
+ # directory does not exist, and permission so far are good:
+ # create the directory, and make it accessible by everyone
+ # but owned by root, as it might be used by many users.
+ with util.SeLinuxGuard(parent_folder):
+ mode = 0o755
+ uid = root_pwent.pw_uid
+ gid = root_pwent.pw_gid
+ if parent_folder.startswith(user_pwent.pw_dir):
+ mode = 0o700
+ uid = user_pwent.pw_uid
+ gid = user_pwent.pw_gid
+ os.makedirs(parent_folder, mode=mode, exist_ok=True)
+ util.chownbyid(parent_folder, uid, gid)
+
+ permissions = check_permissions(username, parent_folder,
+ filename, False, strictmodes)
+ if not permissions:
+ return False
+
+ if os.path.islink(filename) or os.path.isdir(filename):
+ LOG.debug("%s is not a file!", filename)
+ return False
+
+ # check the file
+ if not os.path.exists(filename):
+ # if file does not exist: we need to create it, since the
+ # folders at this point exist and have right permissions
+ util.write_file(filename, '', mode=0o600, ensure_dir_exists=True)
+ util.chownbyid(filename, user_pwent.pw_uid, user_pwent.pw_gid)
+
+ permissions = check_permissions(username, filename,
+ filename, True, strictmodes)
+ if not permissions:
+ return False
+ except (IOError, OSError) as e:
+ util.logexc(LOG, str(e))
+ return False
+
+ return True
+
+
def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
(ssh_dir, pw_ent) = users_ssh_info(username)
default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys')
+ user_authorizedkeys_file = default_authorizedkeys_file
auth_key_fns = []
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
ssh_cfg = parse_ssh_config_map(sshd_cfg_file)
+ key_paths = ssh_cfg.get("authorizedkeysfile",
+ "%h/.ssh/authorized_keys")
+ strictmodes = ssh_cfg.get("strictmodes", "yes")
auth_key_fns = render_authorizedkeysfile_paths(
- ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"),
- pw_ent.pw_dir, username)
+ key_paths, pw_ent.pw_dir, username)
except (IOError, OSError):
# Give up and use a default key filename
@@ -267,17 +402,31 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
"config from %r, using 'AuthorizedKeysFile' file "
"%r instead", DEF_SSHD_CFG, auth_key_fns[0])
- # always store all the keys in the user's private file
- return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns))
+ # check if one of the keys is the user's one and has the right permissions
+ for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns):
+ if any([
+ '%u' in key_path,
+ '%h' in key_path,
+ auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir))
+ ]):
+ permissions_ok = check_create_path(username, auth_key_fn,
+ strictmodes == "yes")
+ if permissions_ok:
+ user_authorizedkeys_file = auth_key_fn
+ break
+ if user_authorizedkeys_file != default_authorizedkeys_file:
+ LOG.debug(
+ "AuthorizedKeysFile has an user-specific authorized_keys, "
+ "using %s", user_authorizedkeys_file)
-def setup_user_keys(keys, username, options=None):
- # Make sure the users .ssh dir is setup accordingly
- (ssh_dir, pwent) = users_ssh_info(username)
- if not os.path.isdir(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0o700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+ return (
+ user_authorizedkeys_file,
+ parse_authorized_keys([user_authorizedkeys_file])
+ )
+
+def setup_user_keys(keys, username, options=None):
# Turn the 'update' keys given into actual entries
parser = AuthKeyLineParser()
key_entries = []
@@ -286,11 +435,10 @@ def setup_user_keys(keys, username, options=None):
# Extract the old and make the new
(auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
+ ssh_dir = os.path.dirname(auth_key_fn)
with util.SeLinuxGuard(ssh_dir, recursive=True):
content = update_authorized_keys(auth_key_entries, key_entries)
- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
- util.write_file(auth_key_fn, content, mode=0o600)
- util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
+ util.write_file(auth_key_fn, content, preserve_mode=True)
class SshdConfigLine(object):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 5bacc85d..bc164fa0 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -8,9 +8,11 @@ import copy
import os
import pickle
import sys
+from collections import namedtuple
+from typing import Dict, Set
from cloudinit.settings import (
- FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG)
+ FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG)
from cloudinit import handlers
@@ -21,7 +23,11 @@ from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler
from cloudinit.handlers.shell_script import ShellScriptPartHandler
from cloudinit.handlers.upstart_job import UpstartJobPartHandler
-from cloudinit.event import EventType
+from cloudinit.event import (
+ EventScope,
+ EventType,
+ userdata_to_events,
+)
from cloudinit.sources import NetworkConfigSource
from cloudinit import cloud
@@ -118,6 +124,7 @@ class Init(object):
def _initial_subdirs(self):
c_dir = self.paths.cloud_dir
+ run_dir = self.paths.run_dir
initial_dirs = [
c_dir,
os.path.join(c_dir, 'scripts'),
@@ -130,6 +137,7 @@ class Init(object):
os.path.join(c_dir, 'handlers'),
os.path.join(c_dir, 'sem'),
os.path.join(c_dir, 'data'),
+ os.path.join(run_dir, 'sem'),
]
return initial_dirs
@@ -148,7 +156,7 @@ class Init(object):
util.ensure_dirs(self._initial_subdirs())
log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
if log_file:
- util.ensure_file(log_file, preserve_mode=True)
+ util.ensure_file(log_file, mode=0o640, preserve_mode=True)
perms = self.cfg.get('syslog_fix_perms')
if not perms:
perms = {}
@@ -233,7 +241,7 @@ class Init(object):
else:
return (None, "cache invalid in datasource: %s" % ds)
- def _get_data_source(self, existing):
+ def _get_data_source(self, existing) -> sources.DataSource:
if self.datasource is not NULL_DATA_SOURCE:
return self.datasource
@@ -259,7 +267,7 @@ class Init(object):
cfg_list,
pkg_list, self.reporter)
LOG.info("Loaded datasource %s - %s", dsname, ds)
- self.datasource = ds
+ self.datasource = ds # type: sources.DataSource
# Ensure we adjust our path members datasource
# now that we have one (thus allowing ipath to be used)
self._reset()
@@ -341,6 +349,11 @@ class Init(object):
return self._previous_iid
def is_new_instance(self):
+ """Return true if this is a new instance.
+
+ If datasource has already been initialized, this will return False,
+ even on first boot.
+ """
previous = self.previous_iid()
ret = (previous == NO_PREVIOUS_INSTANCE_ID or
previous != self.datasource.get_instance_id())
@@ -702,6 +715,46 @@ class Init(object):
return (self.distro.generate_fallback_config(),
NetworkConfigSource.fallback)
+ def update_event_enabled(
+ self, event_source_type: EventType, scope: EventScope = None
+ ) -> bool:
+ """Determine if a particular EventType is enabled.
+
+ For the `event_source_type` passed in, check whether this EventType
+ is enabled in the `updates` section of the userdata. If `updates`
+ is not enabled in userdata, check if defined as one of the
+ `default_events` on the datasource. `scope` may be used to
+ narrow the check to a particular `EventScope`.
+
+ Note that on first boot, userdata may NOT be available yet. In this
+ case, we only have the data source's `default_update_events`,
+ so an event that should be enabled in userdata may be denied.
+ """
+ default_events = self.datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501
+ user_events = userdata_to_events(self.cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501
+ # A value in the first will override a value in the second
+ allowed = util.mergemanydict([
+ copy.deepcopy(user_events),
+ copy.deepcopy(default_events),
+ ])
+ LOG.debug('Allowed events: %s', allowed)
+
+ if not scope:
+ scopes = allowed.keys()
+ else:
+ scopes = [scope]
+ scope_values = [s.value for s in scopes]
+
+ for evt_scope in scopes:
+ if event_source_type in allowed.get(evt_scope, []):
+ LOG.debug('Event Allowed: scope=%s EventType=%s',
+ evt_scope.value, event_source_type)
+ return True
+
+ LOG.debug('Event Denied: scopes=%s EventType=%s',
+ scope_values, event_source_type)
+ return False
+
def _apply_netcfg_names(self, netcfg):
try:
LOG.debug("applying net config names for %s", netcfg)
@@ -709,27 +762,51 @@ class Init(object):
except Exception as e:
LOG.warning("Failed to rename devices: %s", e)
+ def _get_per_boot_network_semaphore(self):
+ return namedtuple('Semaphore', 'semaphore args')(
+ helpers.FileSemaphores(self.paths.get_runpath('sem')),
+ ('apply_network_config', PER_ONCE)
+ )
+
+ def _network_already_configured(self) -> bool:
+ sem = self._get_per_boot_network_semaphore()
+ return sem.semaphore.has_run(*sem.args)
+
def apply_network_config(self, bring_up):
- # get a network config
+ """Apply the network config.
+
+ Find the config, determine whether to apply it, apply it via
+ the distro, and optionally bring it up
+ """
netcfg, src = self._find_networking_config()
if netcfg is None:
LOG.info("network config is disabled by %s", src)
return
- # request an update if needed/available
- if self.datasource is not NULL_DATA_SOURCE:
- if not self.is_new_instance():
- if not self.datasource.update_metadata([EventType.BOOT]):
- LOG.debug(
- "No network config applied. Neither a new instance"
- " nor datasource network update on '%s' event",
- EventType.BOOT)
- # nothing new, but ensure proper names
- self._apply_netcfg_names(netcfg)
- return
- else:
- # refresh netcfg after update
- netcfg, src = self._find_networking_config()
+ def event_enabled_and_metadata_updated(event_type):
+ return self.update_event_enabled(
+ event_type, scope=EventScope.NETWORK
+ ) and self.datasource.update_metadata_if_supported([event_type])
+
+ def should_run_on_boot_event():
+ return (not self._network_already_configured() and
+ event_enabled_and_metadata_updated(EventType.BOOT))
+
+ if (
+ self.datasource is not NULL_DATA_SOURCE and
+ not self.is_new_instance() and
+ not should_run_on_boot_event() and
+ not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY)
+ ):
+ LOG.debug(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update allowed")
+ # nothing new, but ensure proper names
+ self._apply_netcfg_names(netcfg)
+ return
+
+ # refresh netcfg after update
+ netcfg, src = self._find_networking_config()
# ensure all physical devices in config are present
self.distro.networking.wait_for_physdevs(netcfg)
@@ -740,8 +817,12 @@ class Init(object):
# rendering config
LOG.info("Applying network configuration from %s bringup=%s: %s",
src, bring_up, netcfg)
+
+ sem = self._get_per_boot_network_semaphore()
try:
- return self.distro.apply_network_config(netcfg, bring_up=bring_up)
+ with sem.semaphore.lock(*sem.args):
+ return self.distro.apply_network_config(
+ netcfg, bring_up=bring_up)
except net.RendererNotFoundError as e:
LOG.error("Unable to render networking. Network config is "
"likely broken: %s", e)
@@ -989,6 +1070,8 @@ def _pkl_load(fname):
return None
try:
return pickle.loads(pickle_contents)
+ except sources.DatasourceUnpickleUserDataError:
+ return None
except Exception:
util.logexc(LOG, "Failed loading pickled blob from %s", fname)
return None
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 58f63b69..ccd56793 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -171,7 +171,7 @@ class CiTestCase(TestCase):
if self.with_logs:
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
- logging.getLogger().level = None
+ logging.getLogger().setLevel(logging.NOTSET)
subp.subp = _real_subp
super(CiTestCase, self).tearDown()
@@ -360,6 +360,9 @@ class HttprettyTestCase(CiTestCase):
httpretty.HTTPretty.allow_net_connect = False
httpretty.reset()
httpretty.enable()
+ # Stop the logging from HttpPretty so our logs don't get mixed
+ # up with its logs
+ logging.getLogger('httpretty.core').setLevel(logging.CRITICAL)
def tearDown(self):
httpretty.disable()
diff --git a/cloudinit/tests/test_event.py b/cloudinit/tests/test_event.py
new file mode 100644
index 00000000..3da4c70c
--- /dev/null
+++ b/cloudinit/tests/test_event.py
@@ -0,0 +1,26 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests related to cloudinit.event module."""
+from cloudinit.event import EventType, EventScope, userdata_to_events
+
+
+class TestEvent:
+ def test_userdata_to_events(self):
+ userdata = {'network': {'when': ['boot']}}
+ expected = {EventScope.NETWORK: {EventType.BOOT}}
+ assert expected == userdata_to_events(userdata)
+
+ def test_invalid_scope(self, caplog):
+ userdata = {'networkasdfasdf': {'when': ['boot']}}
+ userdata_to_events(userdata)
+ assert (
+ "'networkasdfasdf' is not a valid EventScope! Update data "
+ "will be ignored for 'networkasdfasdf' scope"
+ ) in caplog.text
+
+ def test_invalid_event(self, caplog):
+ userdata = {'network': {'when': ['bootasdfasdf']}}
+ userdata_to_events(userdata)
+ assert (
+ "'bootasdfasdf' is not a valid EventType! Update data "
+ "will be ignored for 'network' scope"
+ ) in caplog.text
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
index d2d1b37f..a50836a4 100644
--- a/cloudinit/tests/test_stages.py
+++ b/cloudinit/tests/test_stages.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Tests related to cloudinit.stages module."""
-
import os
import stat
@@ -11,7 +10,7 @@ from cloudinit import stages
from cloudinit import sources
from cloudinit.sources import NetworkConfigSource
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit.util import write_file
from cloudinit.tests.helpers import CiTestCase, mock
@@ -52,6 +51,8 @@ class TestInit(CiTestCase):
'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir,
'run_dir': self.tmpdir}}}
self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self._real_is_new_instance = self.init.is_new_instance
+ self.init.is_new_instance = mock.Mock(return_value=True)
def test_wb__find_networking_config_disabled(self):
"""find_networking_config returns no config when disabled."""
@@ -291,6 +292,7 @@ class TestInit(CiTestCase):
m_macs.return_value = {'42:42:42:42:42:42': 'eth9'}
self.init._find_networking_config = fake_network_config
+
self.init.apply_network_config(True)
self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
self.init.distro.apply_network_config.assert_called_with(
@@ -299,6 +301,7 @@ class TestInit(CiTestCase):
@mock.patch('cloudinit.distros.ubuntu.Distro')
def test_apply_network_on_same_instance_id(self, m_ubuntu):
"""Only call distro.apply_network_config_names on same instance id."""
+ self.init.is_new_instance = self._real_is_new_instance
old_instance_id = os.path.join(
self.init.paths.get_cpath('data'), 'instance-id')
write_file(old_instance_id, TEST_INSTANCE_ID)
@@ -311,18 +314,19 @@ class TestInit(CiTestCase):
return net_cfg, NetworkConfigSource.fallback
self.init._find_networking_config = fake_network_config
+
self.init.apply_network_config(True)
self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
self.init.distro.apply_network_config.assert_not_called()
- self.assertIn(
- 'No network config applied. Neither a new instance'
- " nor datasource network update on '%s' event" % EventType.BOOT,
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- @mock.patch('cloudinit.distros.ubuntu.Distro')
- def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs):
- """Apply network if datasource.update_metadata permits BOOT event."""
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed"
+ ) in self.logs.getvalue()
+
+ # CiTestCase doesn't work with pytest.mark.parametrize, and moving this
+ # functionality to a separate class is more cumbersome than it'd be worth
+ # at the moment, so use this as a simple setup
+ def _apply_network_setup(self, m_macs):
old_instance_id = os.path.join(
self.init.paths.get_cpath('data'), 'instance-id')
write_file(old_instance_id, TEST_INSTANCE_ID)
@@ -338,12 +342,80 @@ class TestInit(CiTestCase):
self.init._find_networking_config = fake_network_config
self.init.datasource = FakeDataSource(paths=self.init.paths)
- self.init.datasource.update_events = {'network': [EventType.BOOT]}
+ self.init.is_new_instance = mock.Mock(return_value=False)
+ return net_cfg
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}})
+ def test_apply_network_allowed_when_default_boot(
+ self, m_ubuntu, m_macs
+ ):
+ """Apply network if datasource permits BOOT event."""
+ net_cfg = self._apply_network_setup(m_macs)
+
self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ assert mock.call(
+ net_cfg
+ ) == self.init.distro.apply_network_config_names.call_args_list[-1]
+ assert mock.call(
+ net_cfg, bring_up=True
+ ) == self.init.distro.apply_network_config.call_args_list[-1]
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ def test_apply_network_disabled_when_no_default_boot(
+ self, m_ubuntu, m_macs
+ ):
+ """Don't apply network if datasource has no BOOT event."""
+ self._apply_network_setup(m_macs)
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed"
+ ) in self.logs.getvalue()
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ def test_apply_network_allowed_with_userdata_overrides(
+ self, m_ubuntu, m_macs
+ ):
+ """Apply network if userdata overrides default config"""
+ net_cfg = self._apply_network_setup(m_macs)
+ self.init._cfg = {'updates': {'network': {'when': ['boot']}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(
+ net_cfg)
self.init.distro.apply_network_config.assert_called_with(
net_cfg, bring_up=True)
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.supported_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ def test_apply_network_disabled_when_unsupported(
+ self, m_ubuntu, m_macs
+ ):
+ """Don't apply network config if unsupported.
+
+ Shouldn't work even when specified as userdata
+ """
+ self._apply_network_setup(m_macs)
+
+ self.init._cfg = {'updates': {'network': {'when': ['boot']}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed"
+ ) in self.logs.getvalue()
+
class TestInit_InitializeFilesystem:
"""Tests for cloudinit.stages.Init._initialize_filesystem.
@@ -356,22 +428,20 @@ class TestInit_InitializeFilesystem:
"""A fixture which yields a stages.Init instance with paths and cfg set
As it is replaced with a mock, consumers of this fixture can set
- `init.cfg` if the default empty dict configuration is not appropriate.
+ `init._cfg` if the default empty dict configuration is not appropriate.
"""
- with mock.patch(
- "cloudinit.stages.Init.cfg", mock.PropertyMock(return_value={})
- ):
- with mock.patch("cloudinit.stages.util.ensure_dirs"):
- init = stages.Init()
- init._paths = paths
- yield init
+ with mock.patch("cloudinit.stages.util.ensure_dirs"):
+ init = stages.Init()
+ init._cfg = {}
+ init._paths = paths
+ yield init
@mock.patch("cloudinit.stages.util.ensure_file")
def test_ensure_file_not_called_if_no_log_file_configured(
self, m_ensure_file, init
):
"""If no log file is configured, we should not ensure its existence."""
- init.cfg = {}
+ init._cfg = {}
init._initialize_filesystem()
@@ -380,11 +450,13 @@ class TestInit_InitializeFilesystem:
def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir):
"""If a log file is configured, we should ensure its existence."""
log_file = tmpdir.join("cloud-init.log")
- init.cfg = {"def_log_file": str(log_file)}
+ init._cfg = {"def_log_file": str(log_file)}
init._initialize_filesystem()
- assert log_file.exists
+ assert log_file.exists()
+ # Assert we create it 0o640 by default if it doesn't already exist
+ assert 0o640 == stat.S_IMODE(log_file.stat().mode)
def test_existing_file_permissions_are_not_modified(self, init, tmpdir):
"""If the log file already exists, we should not modify its permissions
@@ -397,7 +469,7 @@ class TestInit_InitializeFilesystem:
log_file = tmpdir.join("cloud-init.log")
log_file.ensure()
log_file.chmod(mode)
- init.cfg = {"def_log_file": str(log_file)}
+ init._cfg = {"def_log_file": str(log_file)}
init._initialize_filesystem()
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index 364ec822..c3918f80 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -8,6 +8,7 @@ from cloudinit import util
from cloudinit import version
import httpretty
+import logging
import requests
@@ -81,6 +82,9 @@ class TestReadFileOrUrl(CiTestCase):
url = 'http://hostname/path'
headers = {'sensitive': 'sekret', 'server': 'blah'}
httpretty.register_uri(httpretty.GET, url)
+ # By default, httpretty will log our request along with the header,
+ # so if we don't change this the secret will show up in the logs
+ logging.getLogger('httpretty.core').setLevel(logging.CRITICAL)
read_file_or_url(url, headers=headers, headers_redact=['sensitive'])
logs = self.logs.getvalue()
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index a4c02877..9dd01158 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -124,6 +124,68 @@ OS_RELEASE_ALMALINUX_8 = dedent("""\
ALMALINUX_MANTISBT_PROJECT_VERSION="8.3"
""")
+OS_RELEASE_EUROLINUX_7 = dedent("""\
+ VERSION="7.9 (Minsk)"
+ ID="eurolinux"
+ ID_LIKE="rhel scientific centos fedora"
+ VERSION_ID="7.9"
+ PRETTY_NAME="EuroLinux 7.9 (Minsk)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA"
+ HOME_URL="http://www.euro-linux.com/"
+ BUG_REPORT_URL="mailto:support@euro-linux.com"
+ REDHAT_BUGZILLA_PRODUCT="EuroLinux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.9
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.9"
+""")
+
+OS_RELEASE_EUROLINUX_8 = dedent("""\
+ NAME="EuroLinux"
+ VERSION="8.4 (Vaduz)"
+ ID="eurolinux"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="8.4"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="EuroLinux 8.4 (Vaduz)"
+ ANSI_COLOR="0;34"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:8"
+ HOME_URL="https://www.euro-linux.com/"
+ BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/"
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="8"
+""")
+
+OS_RELEASE_ROCKY_8 = dedent("""\
+ NAME="Rocky Linux"
+ VERSION="8.3 (Green Obsidian)"
+ ID="rocky"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8.3"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:rocky:rocky:8"
+ HOME_URL="https://rockylinux.org/"
+ BUG_REPORT_URL="https://bugs.rockylinux.org/"
+ ROCKY_SUPPORT_PRODUCT="Rocky Linux"
+ ROCKY_SUPPORT_PRODUCT_VERSION="8"
+""")
+
+OS_RELEASE_VIRTUOZZO_8 = dedent("""\
+ NAME="Virtuozzo Linux"
+ VERSION="8"
+ ID="virtuozzo"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Virtuozzo Linux"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8"
+ HOME_URL="https://www.vzlinux.org"
+ BUG_REPORT_URL="https://bugs.openvz.org"
+""")
+
REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
REDHAT_RELEASE_REDHAT_6 = (
@@ -132,7 +194,12 @@ REDHAT_RELEASE_REDHAT_7 = (
"Red Hat Enterprise Linux Server release 7.5 (Maipo)")
REDHAT_RELEASE_ALMALINUX_8 = (
"AlmaLinux release 8.3 (Purple Manul)")
-
+REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)"
+REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)"
+REDHAT_RELEASE_ROCKY_8 = (
+ "Rocky Linux release 8.3 (Green Obsidian)")
+REDHAT_RELEASE_VIRTUOZZO_8 = (
+ "Virtuozzo Linux release 8")
OS_RELEASE_DEBIAN = dedent("""\
PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
@@ -160,6 +227,17 @@ OS_RELEASE_UBUNTU = dedent("""\
UBUNTU_CODENAME=xenial\n
""")
+OS_RELEASE_PHOTON = ("""\
+ NAME="VMware Photon OS"
+ VERSION="4.0"
+ ID=photon
+ VERSION_ID=4.0
+ PRETTY_NAME="VMware Photon OS/Linux"
+ ANSI_COLOR="1;34"
+ HOME_URL="https://vmware.github.io/photon/"
+ BUG_REPORT_URL="https://github.com/vmware/photon/issues"
+""")
+
class FakeCloud(object):
@@ -538,6 +616,70 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist)
@mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
def test_get_linux_debian(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on Debian."""
m_os_release.return_value = OS_RELEASE_DEBIAN
@@ -576,6 +718,15 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(
('opensuse-tumbleweed', '20180920', platform.machine()), dist)
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_photon_os_release(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on PhotonOS"""
+ m_os_release.return_value = OS_RELEASE_PHOTON
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(
+ ('photon', '4.0', 'VMware Photon OS/Linux'), dist)
+
@mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
def test_get_linux_distro_no_data(self, m_platform_dist,
diff --git a/cloudinit/util.py b/cloudinit/util.py
index fdea1181..c53f6453 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -35,6 +35,7 @@ from base64 import b64decode, b64encode
from errno import ENOENT
from functools import lru_cache
from urllib import parse
+from typing import List
from cloudinit import importer
from cloudinit import log as logging
@@ -392,7 +393,11 @@ def is_Linux():
@lru_cache()
def is_BSD():
- return 'BSD' in platform.system()
+ if 'BSD' in platform.system():
+ return True
+ if platform.system() == 'DragonFly':
+ return True
+ return False
@lru_cache()
@@ -401,6 +406,11 @@ def is_FreeBSD():
@lru_cache()
+def is_DragonFlyBSD():
+ return system_info()['variant'] == "dragonfly"
+
+
+@lru_cache()
def is_NetBSD():
return system_info()['variant'] == "netbsd"
@@ -444,9 +454,19 @@ def _parse_redhat_release(release_file=None):
redhat_regex = (
r'(?P<name>.+) release (?P<version>[\d\.]+) '
r'\((?P<codename>[^)]+)\)')
+
+ # Virtuozzo deviates here
+ if "Virtuozzo" in redhat_release:
+ redhat_regex = r'(?P<name>.+) release (?P<version>[\d\.]+)'
+
match = re.match(redhat_regex, redhat_release)
if match:
group = match.groupdict()
+
+ # Virtuozzo has no codename in this file
+ if "Virtuozzo" in group['name']:
+ group['codename'] = group['name']
+
group['name'] = group['name'].lower().partition(' linux')[0]
if group['name'] == 'red hat enterprise':
group['name'] = 'redhat'
@@ -461,9 +481,11 @@ def get_linux_distro():
distro_version = ''
flavor = ''
os_release = {}
+ os_release_rhel = False
if os.path.exists('/etc/os-release'):
os_release = load_shell_content(load_file('/etc/os-release'))
if not os_release:
+ os_release_rhel = True
os_release = _parse_redhat_release()
if os_release:
distro_name = os_release.get('ID', '')
@@ -474,6 +496,11 @@ def get_linux_distro():
# which will include both version codename and architecture
# on all distributions.
flavor = platform.machine()
+ elif distro_name == 'photon':
+ flavor = os_release.get('PRETTY_NAME', '')
+ elif distro_name == 'virtuozzo' and not os_release_rhel:
+ # Only use this if the redhat file is not parsed
+ flavor = os_release.get('PRETTY_NAME', '')
else:
flavor = os_release.get('VERSION_CODENAME', '')
if not flavor:
@@ -521,8 +548,8 @@ def system_info():
if system == "linux":
linux_dist = info['dist'][0].lower()
if linux_dist in (
- 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora',
- 'rhel', 'suse'):
+ 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'eurolinux',
+ 'fedora', 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
@@ -534,7 +561,9 @@ def system_info():
var = 'suse'
else:
var = 'linux'
- elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"):
+ elif system in (
+ 'windows', 'darwin', "freebsd", "netbsd",
+ "openbsd", "dragonfly"):
var = system
info['variant'] = var
@@ -1195,6 +1224,23 @@ def find_devs_with_openbsd(criteria=None, oformat='device',
return ['/dev/' + i for i in devlist]
+def find_devs_with_dragonflybsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ out, _err = subp.subp(['sysctl', '-n', 'kern.disks'], rcs=[0])
+ devlist = [i for i in sorted(out.split(), reverse=True)
+ if not i.startswith("md") and not i.startswith("vn")]
+
+ if criteria == "TYPE=iso9660":
+ devlist = [i for i in devlist
+ if i.startswith('cd') or i.startswith('acd')]
+ elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]:
+ devlist = [i for i in devlist
+ if not (i.startswith('cd') or i.startswith('acd'))]
+ elif criteria:
+ LOG.debug("Unexpected criteria: %s", criteria)
+ return ['/dev/' + i for i in devlist]
+
+
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
@@ -1213,6 +1259,9 @@ def find_devs_with(criteria=None, oformat='device',
elif is_OpenBSD():
return find_devs_with_openbsd(criteria, oformat,
tag, no_cache, path)
+ elif is_DragonFlyBSD():
+ return find_devs_with_dragonflybsd(criteria, oformat,
+ tag, no_cache, path)
blk_id_cmd = ['blkid']
options = []
@@ -1830,6 +1879,53 @@ def chmod(path, mode):
os.chmod(path, real_mode)
+def get_permissions(path: str) -> int:
+ """
+ Returns the octal permissions of the file/folder pointed by the path,
+ encoded as an int.
+
+ @param path: The full path of the file/folder.
+ """
+
+ return stat.S_IMODE(os.stat(path).st_mode)
+
+
+def get_owner(path: str) -> str:
+ """
+ Returns the owner of the file/folder pointed by the path.
+
+ @param path: The full path of the file/folder.
+ """
+ st = os.stat(path)
+ return pwd.getpwuid(st.st_uid).pw_name
+
+
+def get_group(path: str) -> str:
+ """
+ Returns the group of the file/folder pointed by the path.
+
+ @param path: The full path of the file/folder.
+ """
+ st = os.stat(path)
+ return grp.getgrgid(st.st_gid).gr_name
+
+
+def get_user_groups(username: str) -> List[str]:
+ """
+ Returns a list of all groups to which the user belongs
+
+ @param username: the user we want to check
+ """
+ groups = []
+ for group in grp.getgrall():
+ if username in group.gr_mem:
+ groups.append(group.gr_name)
+
+ gid = pwd.getpwnam(username).pw_gid
+ groups.append(grp.getgrgid(gid).gr_name)
+ return groups
+
+
def write_file(
filename,
content,
@@ -1856,8 +1952,7 @@ def write_file(
if preserve_mode:
try:
- file_stat = os.stat(filename)
- mode = stat.S_IMODE(file_stat.st_mode)
+ mode = get_permissions(filename)
except OSError:
pass
@@ -2211,6 +2306,14 @@ def find_freebsd_part(fs):
LOG.warning("Unexpected input in find_freebsd_part: %s", fs)
+def find_dragonflybsd_part(fs):
+ splitted = fs.split('/')
+ if len(splitted) == 3 and splitted[1] == 'dev':
+ return splitted[2]
+ else:
+ LOG.warning("Unexpected input in find_dragonflybsd_part: %s", fs)
+
+
def get_path_dev_freebsd(path, mnt_list):
path_found = None
for line in mnt_list.split("\n"):
@@ -2264,6 +2367,9 @@ def parse_mount(path):
# https://regex101.com/r/T2en7a/1
regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) '
r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
+ if is_DragonFlyBSD():
+ regex = (r'^(/dev/[\S]+|\S*?) on (/[\S]*) '
+ r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
for line in mount_locs:
m = re.search(regex, line)
if not m:
diff --git a/cloudinit/version.py b/cloudinit/version.py
index be47aff3..b798a6d7 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "21.2"
+__VERSION__ = "21.3"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 8656daa7..825deff4 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -1,8 +1,8 @@
## template:jinja
# The top level settings are used as module
# and system configuration.
-
-{% if variant.endswith("bsd") %}
+{% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %}
+{% if is_bsd %}
syslog_fix_perms: root:wheel
{% elif variant in ["suse"] %}
syslog_fix_perms: root:root
@@ -11,17 +11,29 @@ syslog_fix_perms: root:root
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
+{% if variant in ["photon"] %}
+ - name: root
+ lock_passwd: false
+{% else %}
- default
+{% endif %}
+
+{% if variant in ["photon"] %}
+# VMware guest customization.
+disable_vmware_customization: true
+manage_etc_hosts: false
+{% endif %}
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the default $user
-{% if variant in ["freebsd"] %}
+{% if variant in ["freebsd", "photon"] %}
disable_root: false
{% else %}
disable_root: true
{% endif %}
-{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "centos", "eurolinux",
+ "fedora", "rhel", "rocky", "virtuozzo"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
@@ -33,6 +45,8 @@ ssh_pwauth: 0
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
+# If you use datasource_list array, keep array items in a single line.
+# If you use multi line array, ds-identify script won't read array items.
{% if variant.endswith("bsd") %}
# This should not be required, but leave it in place until the real cause of
# not finding -any- datasources is resolved.
@@ -60,22 +74,24 @@ cloud_init_modules:
{% endif %}
- bootcmd
- write-files
-{% if variant not in ["netbsd"] %}
+{% if variant not in ["netbsd", "openbsd"] %}
- growpart
- resizefs
{% endif %}
-{% if variant not in ["freebsd", "netbsd"] %}
+{% if not is_bsd %}
- disk_setup
- mounts
{% endif %}
- set_hostname
- update_hostname
- update_etc_hosts
-{% if variant in ["alpine"] %}
+{% if variant in ["alpine", "photon"] %}
- resolv_conf
{% endif %}
{% if not variant.endswith("bsd") %}
+{% if variant not in ["photon"] %}
- ca-certs
+{% endif %}
- rsyslog
{% endif %}
- users-groups
@@ -89,11 +105,15 @@ cloud_config_modules:
- emit_upstart
- snap
{% endif %}
+{% if variant not in ["photon"] %}
- ssh-import-id
- locale
+{% endif %}
- set-passwords
-{% if variant in ["rhel", "fedora"] %}
+{% if variant in ["rhel", "fedora", "photon"] %}
+{% if variant not in ["photon"] %}
- spacewalk
+{% endif %}
- yum-add-repo
{% endif %}
{% if variant in ["ubuntu", "unknown", "debian"] %}
@@ -154,9 +174,11 @@ cloud_final_modules:
system_info:
# This will affect which distro class gets used
{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "fedora", "freebsd", "netbsd", "openbsd", "rhel",
- "suse", "ubuntu"] %}
+ "eurolinux", "fedora", "freebsd", "netbsd", "openbsd",
+ "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %}
distro: {{ variant }}
+{% elif variant in ["dragonfly"] %}
+ distro: dragonflybsd
{% else %}
# Unknown/fallback distro.
distro: ubuntu
@@ -206,8 +228,8 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora",
- "rhel", "suse"] %}
+{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "eurolinux",
+ "fedora", "rhel", "rocky", "suse", "virtuozzo"] %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
@@ -248,6 +270,15 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/tcsh
+{% elif variant in ["dragonfly"] %}
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: dragonfly
+ lock_passwd: True
+ gecos: DragonFly
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/sh
{% elif variant in ["netbsd"] %}
default_user:
name: netbsd
@@ -264,8 +295,32 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/ksh
+{% elif variant == "photon" %}
+ default_user:
+ name: photon
+ lock_passwd: True
+ gecos: PhotonOS
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/bash
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ network:
+ renderers: ['networkd']
+
+ ssh_svcname: sshd
+
+ # If set to true, cloud-init will not use fallback network config.
+ # In Photon, we have default network settings, hence if network settings are
+ # not explicitly given in metadata, don't use fallback network config.
+ disable_fallback_netcfg: true
{% endif %}
{% if variant in ["freebsd", "netbsd", "openbsd"] %}
network:
renderers: ['{{ variant }}']
+{% elif variant in ["dragonfly"] %}
+ network:
+ renderers: ['freebsd']
{% endif %}
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index 004894b7..f4392326 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -138,6 +138,12 @@ apt:
# the first defining a valid mirror wins (in the order as defined here,
# not the order as listed in the config).
#
+ # Additionally, if the repository requires a custom signing key, it can be
+ # specified via the same fields as for custom sources:
+ # 'keyid': providing a key to import via shortid or fingerprint
+ # 'key': providing a raw PGP key
+ # 'keyserver': specify an alternate keyserver to pull keys from that
+ # were specified by keyid
- arches: [s390x, arm64]
# as above, allowing to have one config for different per arch mirrors
# security is optional, if not defined it is set to the same value as primary
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
index 3c7e2da7..c6bc15de 100644
--- a/doc/examples/cloud-config-puppet.txt
+++ b/doc/examples/cloud-config-puppet.txt
@@ -1,25 +1,65 @@
#cloud-config
#
-# This is an example file to automatically setup and run puppetd
+# This is an example file to automatically setup and run puppet
# when the instance boots for the first time.
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
puppet:
+ # Boolean: whether or not to install puppet (default: true)
+ install: true
+
+ # A specific version to pass to the installer script or package manager
+ version: "7.7.0"
+
+ # Valid values are 'packages' and 'aio' (default: 'packages')
+ install_type: "packages"
+
+ # Puppet collection to install if 'install_type' is 'aio'
+ collection: "puppet7"
+
+ # Boolean: whether or not to remove the puppetlabs repo after installation
+ # if 'install_type' is 'aio' (default: true)
+ cleanup: true
+
+ # If 'install_type' is 'aio', change the url to the install script
+ aio_install_url: "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh"
+
+ # Path to the puppet config file (default: depends on 'install_type')
+ conf_file: "/etc/puppet/puppet.conf"
+
+ # Path to the puppet SSL directory (default: depends on 'install_type')
+ ssl_dir: "/var/lib/puppet/ssl"
+
+ # Path to the CSR attributes file (default: depends on 'install_type')
+ csr_attributes_path: "/etc/puppet/csr_attributes.yaml"
+
+ # The name of the puppet package to install (no-op if 'install_type' is 'aio')
+ package_name: "puppet"
+
+ # Boolean: whether or not to run puppet after configuration finishes
+ # (default: false)
+ exec: false
+
+ # A list of arguments to pass to 'puppet agent' if 'exec' is true
+ # (default: ['--test'])
+ exec_args: ['--test']
+
# Every key present in the conf object will be added to puppet.conf:
# [name]
# subkey=value
#
# For example the configuration below will have the following section
# added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
+ # [main]
+ # server=puppetserver.example.org
# certname=i-0123456.ip-X-Y-Z.cloud.internal
#
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
+ # The puppetserver ca certificate will be available in
+ # /var/lib/puppet/ssl/certs/ca.pem if using distro packages
+ # or /etc/puppetlabs/puppet/ssl/certs/ca.pem if using AIO packages.
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
# certname supports substitutions at runtime:
# %i: instanceid
# Example: i-0123456
@@ -29,11 +69,13 @@ puppet:
# NB: the certname will automatically be lowercased as required by puppet
certname: "%i.%f"
# ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
+ # It holds the puppetserver certificate in pem format.
# It should be a multi-line string (using the | yaml notation for
# multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ # The puppetserver certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host if using
+ # distro packages or /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem if using AIO
+ # packages.
#
ca_cert: |
-----BEGIN CERTIFICATE-----
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 4a5a7e20..1faecf75 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -19,7 +19,7 @@ users:
primary_group: foobar
groups: users
selinux_user: staff_u
- expiredate: '2012-09-01'
+ expiredate: '2032-09-01'
ssh_import_id: foobar
lock_passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 10e8228f..67d6a9e3 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -27,7 +27,7 @@ Getting help
Having trouble? We would like to help!
- Try the :ref:`FAQ` – its got answers to some common questions
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
@@ -49,6 +49,7 @@ Having trouble? We would like to help!
topics/format.rst
topics/examples.rst
+ topics/events.rst
topics/modules.rst
topics/merging.rst
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index f3e13edc..d8ca9d16 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -14,18 +14,20 @@ distributions and clouds, both public and private.
Distributions
=============
-Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD
-and OpenBSD:
+Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD,
+OpenBSD and DragonFlyBSD:
- Alpine Linux
- ArchLinux
- Debian
+- DragonFlyBSD
- Fedora
- FreeBSD
- Gentoo Linux
- NetBSD
- OpenBSD
-- RHEL/CentOS
+- Photon OS
+- RHEL/CentOS/AlmaLinux/Rocky Linux/EuroLinux
- SLES/openSUSE
- Ubuntu
@@ -42,7 +44,7 @@ environments in the public cloud:
- Softlayer
- Rackspace Public Cloud
- IBM Cloud
-- Digital Ocean
+- DigitalOcean
- Bigstep
- Hetzner
- Joyent
@@ -57,6 +59,7 @@ environments in the public cloud:
- SmartOS
- UpCloud
- Vultr
+- Zadara Edge Cloud Platform
Additionally, cloud-init is supported on these private clouds:
@@ -65,5 +68,6 @@ Additionally, cloud-init is supported on these private clouds:
- LXD
- KVM
- Metal-as-a-Service (MAAS)
+- VMware
.. vi: textwidth=79
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index 0ff230b5..b6115ed6 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -119,6 +119,10 @@ Current subcommands:
schema errors locally without the need for deployment. Schema
validation is work in progress and supports a subset of cloud-config
modules.
+ * ``hotplug-hook``: respond to newly added system devices by retrieving
+ updated system metadata and bringing up/down the corresponding device.
+ This command is intended to be called via a systemd service and is
+ not considered user-accessible except for debugging purposes.
.. _cli_features:
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
index 68c10405..33aad789 100644
--- a/doc/rtd/topics/code_review.rst
+++ b/doc/rtd/topics/code_review.rst
@@ -22,7 +22,7 @@ questions about the code review process, or at any point during the
code review process, these are the available avenues:
* if you have an open Pull Request, comment on that pull request
-* join the ``#cloud-init`` channel on the Freenode IRC network and ask
+* join the ``#cloud-init`` channel on the Libera IRC network and ask
away
* send an email to the cloud-init mailing list,
cloud-init@lists.launchpad.net
@@ -58,12 +58,12 @@ Reviewer
Committer
A cloud-init core developer (i.e. a person who has permission to
- merge PRs into master).
+ merge PRs into **main**).
Prerequisites For Landing Pull Requests
=======================================
-Before a PR can be landed into master, the following conditions *must*
+Before a PR can be landed into **main**, the following conditions *must*
be met:
* the CLA has been signed by the **Proposer** (or is covered by an
@@ -148,7 +148,7 @@ temporarily closed. (The first two are covered in this section; see
(In the below, when the verbs "merge" or "squash merge" are used, they
should be understood to mean "squash merged using the GitHub UI", which
-is the only way that changes can land in cloud-init's master branch.)
+is the only way that changes can land in cloud-init's **main** branch.)
These are the steps that comprise the review phase:
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 497b1467..f5aee1c2 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -50,6 +50,7 @@ The following is a list of documents for each supported datasource:
datasources/upcloud.rst
datasources/zstack.rst
datasources/vultr.rst
+ datasources/vmware.rst
Creation
========
diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst
index 88f1e5f5..a4910408 100644
--- a/doc/rtd/topics/datasources/digitalocean.rst
+++ b/doc/rtd/topics/datasources/digitalocean.rst
@@ -1,7 +1,7 @@
.. _datasource_digital_ocean:
-Digital Ocean
-=============
+DigitalOcean
+============
The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's
`metadata service`_. This metadata service serves information about the
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index edb41e2a..dbe21834 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -50,6 +50,8 @@ These user-data and meta-data files are expected to be in the following format.
/user-data
/meta-data
+Both files are required to be present for it to be considered a valid seed ISO.
+
Basically, user-data is simply user-data and meta-data is a yaml formatted file
representing what you'd find in the EC2 metadata service.
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
index 85b0c377..bd5df860 100644
--- a/doc/rtd/topics/datasources/ovf.rst
+++ b/doc/rtd/topics/datasources/ovf.rst
@@ -18,6 +18,10 @@ configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
The settings that may be configured are:
+ * disable_vmware_customization: disable or enable the vmware customization
+ based on vmware customization files. (default: True)
+ * allow_raw_data: enable or disable the vmware customization based on raw
+ cloud-init data including metadata and userdata. (default: True)
* vmware_cust_file_max_wait: the maximum amount of clock time in seconds that
should be spent waiting for vmware customization files. (default: 15)
@@ -35,7 +39,7 @@ The following VMTools configuration options affect cloud-init's behavior on a bo
change this default behavior (for example: enabled by default) via
customization specification settings.
-VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/master/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
+VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
For more information, see [VMware vSphere Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html) and specific VMTools parameters consumed.
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
new file mode 100644
index 00000000..996eb61f
--- /dev/null
+++ b/doc/rtd/topics/datasources/vmware.rst
@@ -0,0 +1,359 @@
+.. _datasource_vmware:
+
+VMware
+======
+
+This datasource is for use with systems running on a VMware platform such as
+vSphere and currently supports the following data transports:
+
+
+* `GuestInfo <https://github.com/vmware/govmomi/blob/master/govc/USAGE.md#vmchange>`_ keys
+
+Configuration
+-------------
+
+The configuration method is dependent upon the transport:
+
+GuestInfo Keys
+^^^^^^^^^^^^^^
+
+One method of providing meta, user, and vendor data is by setting the following
+key/value pairs on a VM's ``extraConfig`` `property <https://vdc-repo.vmware.com/vmwb-repository/dcr-public/723e7f8b-4f21-448b-a830-5f22fd931b01/5a8257bd-7f41-4423-9a73-03307535bd42/doc/vim.vm.ConfigInfo.html>`_ :
+
+.. list-table::
+ :header-rows: 1
+
+ * - Property
+ - Description
+ * - ``guestinfo.metadata``
+ - A YAML or JSON document containing the cloud-init metadata.
+ * - ``guestinfo.metadata.encoding``
+ - The encoding type for ``guestinfo.metadata``.
+ * - ``guestinfo.userdata``
+ - A YAML document containing the cloud-init user data.
+ * - ``guestinfo.userdata.encoding``
+ - The encoding type for ``guestinfo.userdata``.
+ * - ``guestinfo.vendordata``
+ - A YAML document containing the cloud-init vendor data.
+ * - ``guestinfo.vendordata.encoding``
+ - The encoding type for ``guestinfo.vendordata``.
+
+
+All ``guestinfo.*.encoding`` values may be set to ``base64`` or
+``gzip+base64``.
+
+Features
+--------
+
+This section reviews several features available in this datasource, regardless
+of how the meta, user, and vendor data was discovered.
+
+Instance data and lazy networks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+One of the hallmarks of cloud-init is `its use of instance-data and JINJA
+queries <../instancedata.html#using-instance-data>`_
+-- the ability to write queries in user and vendor data that reference runtime
+information present in ``/run/cloud-init/instance-data.json``. This works well
+when the metadata provides all of the information up front, such as the network
+configuration. For systems that rely on DHCP, however, this information may not
+be available when the metadata is persisted to disk.
+
+This datasource ensures that even if the instance is using DHCP to configure
+networking, the same details about the configured network are available in
+``/run/cloud-init/instance-data.json`` as if static networking was used. This
+information collected at runtime is easy to demonstrate by executing the
+datasource on the command line. From the root of this repository, run the
+following command:
+
+.. code-block:: bash
+
+ PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py
+
+The above command will result in output similar to the below JSON:
+
+.. code-block:: json
+
+ {
+ "hostname": "akutz.localhost",
+ "local-hostname": "akutz.localhost",
+ "local-ipv4": "192.168.0.188",
+ "local_hostname": "akutz.localhost",
+ "network": {
+ "config": {
+ "dhcp": true
+ },
+ "interfaces": {
+ "by-ipv4": {
+ "172.0.0.2": {
+ "netmask": "255.255.255.255",
+ "peer": "172.0.0.2"
+ },
+ "192.168.0.188": {
+ "broadcast": "192.168.0.255",
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "255.255.255.0"
+ }
+ },
+ "by-ipv6": {
+ "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": {
+ "flags": 208,
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ },
+ "by-mac": {
+ "64:4b:f0:18:9a:21": {
+ "ipv4": [
+ {
+ "addr": "192.168.0.188",
+ "broadcast": "192.168.0.255",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "ipv6": [
+ {
+ "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2",
+ "flags": 208,
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ ]
+ },
+ "ac:de:48:00:11:22": {
+ "ipv6": []
+ }
+ }
+ }
+ },
+ "wait-on-network": {
+ "ipv4": true,
+ "ipv6": "false"
+ }
+ }
+
+
+Redacting sensitive information
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes the cloud-init userdata might contain sensitive information, and it
+may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo
+keys) redacted as soon as its data is read by the datasource. This is possible
+by adding the following to the metadata:
+
+.. code-block:: yaml
+
+ redact: # formerly named cleanup-guestinfo, which will also work
+ - userdata
+ - vendordata
+
+When the above snippet is added to the metadata, the datasource will iterate
+over the elements in the ``redact`` array and clear each of the keys. For
+example, when the guestinfo transport is used, the above snippet will cause
+the following commands to be executed:
+
+.. code-block:: shell
+
+ vmware-rpctool "info-set guestinfo.userdata ---"
+ vmware-rpctool "info-set guestinfo.userdata.encoding "
+ vmware-rpctool "info-set guestinfo.vendordata ---"
+ vmware-rpctool "info-set guestinfo.vendordata.encoding "
+
+Please note that keys are set to the valid YAML string ``---`` as it is not
+possible remove an existing key from the guestinfo key-space. A key's analogous
+encoding property will be set to a single white-space character, causing the
+datasource to treat the actual key value as plain-text, thereby loading it as
+an empty YAML doc (hence the aforementioned ``---``\ ).
+
+Reading the local IP addresses
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This datasource automatically discovers the local IPv4 and IPv6 addresses for
+a guest operating system based on the default routes. However, when inspecting
+a VM externally, it's not possible to know what the *default* IP address is for
+the guest OS. That's why this datasource sets the discovered, local IPv4 and
+IPv6 addresses back in the guestinfo namespace as the following keys:
+
+
+* ``guestinfo.local-ipv4``
+* ``guestinfo.local-ipv6``
+
+It is possible that a host may not have any default, local IP addresses. It's
+also possible the reported, local addresses are link-local addresses. But these
+two keys may be used to discover what this datasource determined were the local
+IPv4 and IPv6 addresses for a host.
+
+Waiting on the network
+^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes cloud-init may bring up the network, but it will not finish coming
+online before the datasource's ``setup`` function is called, resulting in an
+``/var/run/cloud-init/instance-data.json`` file that does not have the correct
+network information. It is possible to instruct the datasource to wait until an
+IPv4 or IPv6 address is available before writing the instance data with the
+following metadata properties:
+
+.. code-block:: yaml
+
+ wait-on-network:
+ ipv4: true
+ ipv6: true
+
+If either of the above values are true, then the datasource will sleep for a
+second, check the network status, and repeat until one or both addresses from
+the specified families are available.
+
+Walkthrough
+-----------
+
+The following series of steps is a demonstration on how to configure a VM with
+this datasource:
+
+
+#. Create the metadata file for the VM. Save the following YAML to a file named
+ ``metadata.yaml``\ :
+
+ .. code-block:: yaml
+
+ instance-id: cloud-vm
+ local-hostname: cloud-vm
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+
+#. Create the userdata file ``userdata.yaml``\ :
+
+ .. code-block:: yaml
+
+ #cloud-config
+
+ users:
+ - default
+ - name: akutz
+ primary_group: akutz
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: sudo, wheel
+ ssh_import_id: None
+ lock_passwd: true
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com
+
+#. Please note this step requires that the VM be powered off. All of the
+ commands below use the VMware CLI tool, `govc <https://github.com/vmware/govmomi/blob/master/govc>`_.
+
+ Go ahead and assign the path to the VM to the environment variable ``VM``\ :
+
+ .. code-block:: shell
+
+ export VM="/inventory/path/to/the/vm"
+
+#. Power off the VM:
+
+ .. raw:: html
+
+ <hr />
+
+ &#x26a0;&#xfe0f; <strong>First Boot Mode</strong>
+
+ To ensure the next power-on operation results in a first-boot scenario for
+ cloud-init, it may be necessary to run the following command just before
+ powering off the VM:
+
+ .. code-block:: bash
+
+ cloud-init clean
+
+ Otherwise cloud-init may not run in first-boot mode. For more information
+ on how the boot mode is determined, please see the
+ `First Boot Documentation <../boot.html#first-boot-determination>`_.
+
+ .. raw:: html
+
+ <hr />
+
+ .. code-block:: shell
+
+ govc vm.power -off "${VM}"
+
+#.
+ Export the environment variables that contain the cloud-init metadata and
+ userdata:
+
+ .. code-block:: shell
+
+ export METADATA=$(gzip -c9 <metadata.yaml | { base64 -w0 2>/dev/null || base64; }) \
+ USERDATA=$(gzip -c9 <userdata.yaml | { base64 -w0 2>/dev/null || base64; })
+
+#.
+ Assign the metadata and userdata to the VM:
+
+ .. code-block:: shell
+
+ govc vm.change -vm "${VM}" \
+ -e guestinfo.metadata="${METADATA}" \
+ -e guestinfo.metadata.encoding="gzip+base64" \
+ -e guestinfo.userdata="${USERDATA}" \
+ -e guestinfo.userdata.encoding="gzip+base64"
+
+ Please note the above commands include specifying the encoding for the
+ properties. This is important as it informs the datasource how to decode
+ the data for cloud-init. Valid values for ``metadata.encoding`` and
+ ``userdata.encoding`` include:
+
+
+ * ``base64``
+ * ``gzip+base64``
+
+#.
+ Power on the VM:
+
+ .. code-block:: shell
+
+ govc vm.power -vm "${VM}" -on
+
+If all went according to plan, the CentOS box is:
+
+* Locked down, allowing SSH access only for the user in the userdata
+* Configured for a dynamic IP address via DHCP
+* Has a hostname of ``cloud-vm``
+
+Examples
+--------
+
+This section reviews common configurations:
+
+Setting the hostname
+^^^^^^^^^^^^^^^^^^^^
+
+The hostname is set by way of the metadata key ``local-hostname``.
+
+Setting the instance ID
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The instance ID may be set by way of the metadata key ``instance-id``. However,
+if this value is absent then then the instance ID is read from the file
+``/sys/class/dmi/id/product_uuid``.
+
+Providing public SSH keys
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The public SSH keys may be set by way of the metadata key ``public-keys-data``.
+Each newline-terminated string will be interpreted as a separate SSH public
+key, which will be placed in distro's default user's
+``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will
+be written to ``~/.ssh/authorized_keys``.
+
+Configuring the network
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The network is configured by setting the metadata key ``network`` with a value
+consistent with Network Config Versions
+`1 <../network-config-format-v1.html>`_ or
+`2 <../network-config-format-v2.html>`_\ , depending on the Linux
+distro's version of cloud-init.
+
+The metadata key ``network.encoding`` may be used to indicate the format of
+the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``.
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index fb3006fe..b897a318 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -258,9 +258,9 @@ from **-proposed**
* Create a `new cloud-init bug`_ reporting the version of cloud-init
affected
- * Ping upstream cloud-init on Freenode's `#cloud-init IRC channel`_
+ * Ping upstream cloud-init on Libera's `#cloud-init IRC channel`_
.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates
.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates
.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
-.. _#cloud-init IRC channel: https://webchat.freenode.net/?channel=#cloud-init
+.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst
new file mode 100644
index 00000000..57797bd9
--- /dev/null
+++ b/doc/rtd/topics/events.rst
@@ -0,0 +1,95 @@
+.. _events:
+
+******************
+Events and Updates
+******************
+
+Events
+======
+
+`Cloud-init`_ will fetch and apply cloud and user data configuration
+upon several event types. The two most common events for cloud-init
+are when an instance first boots and any subsequent boot thereafter (reboot).
+In addition to boot events, cloud-init users and vendors are interested
+in when devices are added. cloud-init currently supports the following
+event types:
+
+- **BOOT_NEW_INSTANCE**: New instance first boot
+- **BOOT**: Any system boot other than 'BOOT_NEW_INSTANCE'
+- **BOOT_LEGACY**: Similar to 'BOOT', but applies networking config twice each
+ boot: once during Local stage, then again in Network stage. As this behavior
+ was previously the default behavior, this option exists to prevent regressing
+ such behavior.
+- **HOTPLUG**: Dynamic add of a system device
+
+Future work will likely include infrastructure and support for the following
+events:
+
+- **METADATA_CHANGE**: An instance's metadata has change
+- **USER_REQUEST**: Directed request to update
+
+Datasource Event Support
+========================
+
+All :ref:`datasources` by default support the ``BOOT_NEW_INSTANCE`` event.
+Each Datasource will declare a set of these events that it is capable of
+handling. Datasources may not support all event types. In some cases a system
+may be configured to allow a particular event but may be running on
+a platform whose datasource cannot support the event.
+
+Configuring Event Updates
+=========================
+
+Update configuration may be specified via user data,
+which can be used to enable or disable handling of specific events.
+This configuration will be honored as long as the events are supported by
+the datasource. However, configuration will always be applied at first
+boot, regardless of the user data specified.
+
+Updates
+~~~~~~~
+Update policy configuration defines which
+events are allowed to be handled. This is separate from whether a
+particular platform or datasource has the capability for such events.
+
+**scope**: *<name of the scope for event policy>*
+
+The ``scope`` value is a string which defines under which domain does the
+event occur. Currently the only one known scope is ``network``, though more
+scopes may be added in the future. Scopes are defined by convention but
+arbitrary values can be used.
+
+**when**: *<list of events to handle for a particular scope>*
+
+Each ``scope`` requires a ``when`` element to specify which events
+are to allowed to be handled.
+
+Hotplug
+=======
+When the hotplug event is supported by the data source and configured in
+user data, cloud-init will respond to the addition or removal of network
+interfaces to the system. In addition to fetching and updating the system
+metadata, cloud-init will also bring up/down the newly added interface.
+
+.. warning:: Due to its use of systemd sockets, hotplug functionality
+ is currently incompatible with SELinux. This issue is being tracked
+ `on Launchpad`_. Additionally, hotplug support is considered experimental for
+ non-Debian based systems.
+
+Examples
+========
+
+apply network config every boot
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+On every boot, apply network configuration found in the datasource.
+
+.. code-block:: shell-session
+
+ # apply network config on every boot
+ updates:
+ network:
+ when: ['boot']
+
+.. _Cloud-init: https://launchpad.net/cloud-init
+.. _on Launchpad: https://bugs.launchpad.net/cloud-init/+bug/1936229
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 27fabf15..efc532de 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -10,7 +10,7 @@ Having trouble? We would like to help!
- First go through this page with answers to common questions
- Use the search bar at the upper left to search these docs
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? Check out the :ref:`reporting_bugs` topic for
how to report one
@@ -139,7 +139,7 @@ cloud-config is:
To verify your YAML, we do have a short script called `validate-yaml.py`_
that can validate your user data offline.
-.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/master/tools/validate-yaml.py
+.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py
Another option is to run the following on an instance to debug userdata
provided to the system:
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index fa8aa925..05580804 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -55,7 +55,7 @@ to store the multipart message in ``user-data``:
$ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
-.. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py
+.. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py
User-Data Script
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index 1850982c..6c17139f 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -509,14 +509,19 @@ EC2 instance:
Using instance-data
===================
-As of cloud-init v. 18.4, any variables present in
-``/run/cloud-init/instance-data.json`` can be used in:
+As of cloud-init v. 18.4, any instance-data can be used in:
* User-data scripts
* Cloud config data
* Command line interface via **cloud-init query** or
**cloud-init devel render**
+This means that any variable present in
+``/run/cloud-init/instance-data-sensitive.json`` can be used,
+unless a non-root user is using the command line interface.
+In the non-root user case,
+``/run/cloud-init/instance-data.json`` will be used instead.
+
Many clouds allow users to provide user-data to an instance at
the time the instance is launched. Cloud-init supports a number of
:ref:`user_data_formats`.
@@ -559,9 +564,39 @@ Below are some examples of providing these types of user-data:
{%- endif %}
...
+One way to easily explore what Jinja variables are available on your machine
+is to use the ``cloud-init query --format`` (-f) commandline option which will
+render any Jinja syntax you use. Warnings or exceptions will be raised on
+invalid instance-data keys, paths or invalid syntax.
+
+.. code-block:: shell-session
+
+ # List all instance-data keys and values as root user
+ % sudo cloud-init query --all
+ {...}
+
+ # Introspect nested keys on an object
+ % cloud-init query -f "{{ds.keys()}}"
+ dict_keys(['meta_data', '_doc'])
+
+ # Test your Jinja rendering syntax on the command-line directly
+
+ # Failure to reference valid top-level instance-data key
+ % cloud-init query -f "{{invalid.instance-data.key}}"
+ WARNING: Ignoring jinja template for query commandline: 'invalid' is undefined
+
+ # Failure to reference valid dot-delimited key path on a known top-level key
+ % cloud-init query -f "{{v1.not_here}}"
+ WARNING: Could not render jinja template variables in file 'query commandline': 'not_here'
+ CI_MISSING_JINJA_VAR/not_here
+
+ # Test expected value using valid instance-data key path
+ % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
+ My AMI: ami-0fecc35d3c8ba8d60
+
.. note::
Trying to reference jinja variables that don't exist in
- instance-data.json will result in warnings in ``/var/log/cloud-init.log``
+ instance-data will result in warnings in ``/var/log/cloud-init.log``
and the following string in your rendered user-data:
``CI_MISSING_JINJA_VAR/<your_varname>``.
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 17732c2a..3202163b 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -335,6 +335,10 @@ the following keys:
- ``address``: List of IPv4 or IPv6 address of nameservers.
- ``search``: List of of hostnames to include in the resolv.conf search path.
+- ``interface``: Optional. Ties the nameserver definition to the specified
+ interface. The value specified here must match the `name` of an interface
+ defined in this config. If unspecified, this nameserver will be considered
+ a global nameserver.
**Nameserver Example**::
@@ -349,6 +353,7 @@ the following keys:
address: 192.168.23.14/27
gateway: 192.168.23.1
- type: nameserver
+ interface: interface0 # Ties nameserver to interface0 only
address:
- 192.168.23.2
- 8.8.8.8
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 5f7a74f8..8eb7a31b 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -104,6 +104,13 @@ interface given the information it has available.
Finally after selecting the "right" interface, a configuration is
generated and applied to the system.
+.. note::
+
+ PhotonOS disables fallback networking configuration by default leaving
+ network unrendered when no other network config is provided.
+ If fallback config is still desired on PhotonOS, it can be enabled by
+ providing `disable_fallback_netcfg: false` in
+ `/etc/cloud/cloud.cfg:sys_config` settings.
Network Configuration Sources
=============================
diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst
index 5b702bd2..d882e036 100644
--- a/doc/rtd/topics/testing.rst
+++ b/doc/rtd/topics/testing.rst
@@ -166,8 +166,8 @@ Test Argument Ordering
.. _pytest: https://docs.pytest.org/
.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
-.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9
+.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/fbcb224bc12495ba200ab107246349d802c5d8e6/cloudinit/tests/test_subp.py#L20
.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
-.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param
+.. _pytest.param: https://docs.pytest.org/en/6.2.x/reference.html#pytest-param
.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
diff --git a/doc/sources/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml
index 13e8f104..4ef4ee63 100644
--- a/doc/sources/ovf/example/ovf-env.xml
+++ b/doc/sources/ovf/example/ovf-env.xml
@@ -41,6 +41,14 @@
-->
<Property oe:key="user-data" oe:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="/>
<Property oe:key="password" oe:value="passw0rd"/>
+ <!--
+ network-config is optional, it can only be read from VMware guestinfo.ovfEnv
+ The value for network-config is to be base64 encoded.
+ It will be decoded, and then processed normally as network-config.
+ Set ovf-env.xml to VMware guestinfo.ovfEnv by below command:
+ 'vmware-rpctool "info-set guestinfo.ovfEnv `cat ./ovf-env.xml`"'
+ -->
+ <Property oe:key="network-config" oe:value="bmV0d29yazoKICB2ZXJzaW9uOiAyCiAgZXRoZXJuZXRzOgogICAgbmljczoKICAgICAgbWF0Y2g6CiAgICAgICAgbmFtZTogZXRoKgogICAgICBkaGNwNDogeWVz"/>
</PropertySection>
</Environment>
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 95891356..20940328 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@96b146ee1beb99b8e44e36525e18a9a20e00c3f2
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@245ca0b97e71926fdb651147e42d6256b17f6778
pytest
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index 80028396..eaf13469 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -27,6 +27,20 @@
"sudo"
]
},
+ "eurolinux" : {
+ "build-requires" : [
+ "python3-devel"
+ ],
+ "requires" : [
+ "e2fsprogs",
+ "iproute",
+ "net-tools",
+ "procps",
+ "rsyslog",
+ "shadow-utils",
+ "sudo"
+ ]
+ },
"redhat" : {
"build-requires" : [
"python3-devel"
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 16138012..b930709b 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -119,6 +119,12 @@ version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f)
( cd "$RPM_BUILD_ROOT" &&
sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys )
+# patch hotplug /usr/libexec script path
+hotplug_file=$(cd "$RPM_BUILD_ROOT" && find . -name 10-cloud-init-hook-hotplug.rules -type f)
+
+( cd "$RPM_BUILD_ROOT" &&
+ sed -i "s,/usr/lib,%{_libexecdir}," $hotplug_file )
+
%clean
rm -rf $RPM_BUILD_ROOT
@@ -172,6 +178,7 @@ fi
%files
/lib/udev/rules.d/66-azure-ephemeral.rules
+/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules
%if "%{init_system}" == "systemd"
/usr/lib/systemd/system-generators/cloud-init-generator
diff --git a/requirements.txt b/requirements.txt
index 5817da3b..c4adc455 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -32,3 +32,12 @@ jsonpatch
# For validating cloud-config sections per schema definitions
jsonschema
+
+# Used by DataSourceVMware to inspect the host's network configuration during
+# the "setup()" function.
+#
+# This allows a host that uses DHCP to bring up the network during BootLocal
+# and still participate in instance-data by gathering the network in detail at
+# runtime and merge that information into the metadata and repersist that to
+# disk.
+netifaces>=0.10.4
diff --git a/setup.py b/setup.py
index cbacf48e..7fa03e63 100755
--- a/setup.py
+++ b/setup.py
@@ -128,6 +128,7 @@ INITSYS_FILES = {
'systemd': [render_tmpl(f)
for f in (glob('systemd/*.tmpl') +
glob('systemd/*.service') +
+ glob('systemd/*.socket') +
glob('systemd/*.target'))
if (is_f(f) and not is_generator(f))],
'systemd.generators': [
@@ -156,7 +157,7 @@ USR = "usr"
ETC = "etc"
USR_LIB_EXEC = "usr/lib"
LIB = "lib"
-if os.uname()[0] == 'FreeBSD':
+if os.uname()[0] in ['FreeBSD', 'DragonFly']:
USR = "usr/local"
USR_LIB_EXEC = "usr/local/lib"
elif os.path.isfile('/etc/redhat-release'):
@@ -249,6 +250,7 @@ data_files = [
(ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
(ETC + '/cloud/templates', glob('templates/*')),
(USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify',
+ 'tools/hook-hotplug',
'tools/uncloud-init',
'tools/write-ssh-key-fingerprints']),
(USR + '/share/bash-completion/completions',
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 9b103ef9..3dbe5947 100755..100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,7 +83,8 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["almalinux", "rhel", "fedora", "centos"] %}
+{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel",
+ "rocky", "virtuozzo"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init-hotplugd.service b/systemd/cloud-init-hotplugd.service
new file mode 100644
index 00000000..b64632ef
--- /dev/null
+++ b/systemd/cloud-init-hotplugd.service
@@ -0,0 +1,22 @@
+# Paired with cloud-init-hotplugd.socket to read from the FIFO
+# /run/cloud-init/hook-hotplug-cmd which is created during a udev network
+# add or remove event as processed by 10-cloud-init-hook-hotplug.rules.
+
+# On start, read args from the FIFO, process and provide structured arguments
+# to `cloud-init devel hotplug-hook` which will setup or teardown network
+# devices as configured by user-data.
+
+# Known bug with an enforcing SELinux policy: LP: #1936229
+# cloud-init-hotplud.service will read args from file descriptor 3
+
+[Unit]
+Description=cloud-init hotplug hook daemon
+After=cloud-init-hotplugd.socket
+
+[Service]
+Type=simple
+ExecStart=/bin/bash -c 'read args <&3; echo "args=$args"; \
+ exec /usr/bin/cloud-init devel hotplug-hook $args; \
+ exit 0'
+SyslogIdentifier=cloud-init-hotplugd
+TimeoutStopSec=5
diff --git a/systemd/cloud-init-hotplugd.socket b/systemd/cloud-init-hotplugd.socket
new file mode 100644
index 00000000..aa093016
--- /dev/null
+++ b/systemd/cloud-init-hotplugd.socket
@@ -0,0 +1,13 @@
+# cloud-init-hotplugd.socket listens on the FIFO file
+# /run/cloud-init/hook-hotplug-cmd which is created during a udev network
+# add or remove event as processed by 10-cloud-init-hook-hotplug.rules.
+
+# Known bug with an enforcing SELinux policy: LP: #1936229
+[Unit]
+Description=cloud-init hotplug hook socket
+
+[Socket]
+ListenFIFO=/run/cloud-init/hook-hotplug-cmd
+
+[Install]
+WantedBy=cloud-init.target
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index a5c51277..636f59be 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -1,7 +1,9 @@
## template:jinja
[Unit]
Description=Initial cloud-init job (metadata service crawler)
+{% if variant not in ["photon"] %}
DefaultDependencies=no
+{% endif %}
Wants=cloud-init-local.service
Wants=sshd-keygen.service
Wants=sshd.service
@@ -10,7 +12,8 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["almalinux", "centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel",
+ "rocky", "virtuozzo"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
index aa5bd118..d26f3d0f 100755
--- a/sysvinit/freebsd/cloudinit
+++ b/sysvinit/freebsd/cloudinit
@@ -2,7 +2,7 @@
# PROVIDE: cloudinit
# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal ldconfig devd
-# BEFORE: cloudconfig cloudfinal
+# BEFORE: LOGIN cloudconfig cloudfinal
. /etc/rc.subr
diff --git a/templates/chrony.conf.photon.tmpl b/templates/chrony.conf.photon.tmpl
new file mode 100644
index 00000000..8551f793
--- /dev/null
+++ b/templates/chrony.conf.photon.tmpl
@@ -0,0 +1,48 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Allow the system clock to be stepped in the first three updates
+# if its offset is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Enable hardware timestamping on all interfaces that support it.
+#hwtimestamp *
+
+# Increase the minimum number of selectable sources required to adjust
+# the system clock.
+#minsources 2
+
+# Allow NTP client access from local network.
+#allow 192.168.0.0/16
+
+# Serve time even if not synchronized to a time source.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Get TAI-UTC offset and leap seconds from the system tz database.
+leapsectz right/UTC
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/hosts.photon.tmpl b/templates/hosts.photon.tmpl
new file mode 100644
index 00000000..0fd6f722
--- /dev/null
+++ b/templates/hosts.photon.tmpl
@@ -0,0 +1,22 @@
+## template:jinja
+{#
+This file /etc/cloud/templates/hosts.photon.tmpl is only utilized
+if enabled in cloud-config. Specifically, in order to enable it
+you need to add the following to config:
+ manage_etc_hosts: True
+-#}
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.photon.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
+
+# The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost6.localdomain6 localhost6
diff --git a/templates/ntp.conf.photon.tmpl b/templates/ntp.conf.photon.tmpl
new file mode 100644
index 00000000..4d4910d1
--- /dev/null
+++ b/templates/ntp.conf.photon.tmpl
@@ -0,0 +1,61 @@
+## template:jinja
+
+# For more information about this file, see the man pages
+# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
+
+driftfile /var/lib/ntp/drift
+
+# Permit time synchronization with our time source, but do not
+# permit the source to query or modify the service on this system.
+restrict default kod nomodify notrap nopeer noquery
+restrict -6 default kod nomodify notrap nopeer noquery
+
+# Permit all access over the loopback interface. This could
+# be tightened as well, but to do so would effect some of
+# the administrative functions.
+restrict 127.0.0.1
+restrict -6 ::1
+
+# Hosts on local network are less restricted.
+#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
+
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+#broadcast 192.168.1.255 autokey # broadcast server
+#broadcastclient # broadcast client
+#broadcast 224.0.1.1 autokey # multicast server
+#multicastclient 224.0.1.1 # multicast client
+#manycastserver 239.255.254.254 # manycast server
+#manycastclient 239.255.254.254 autokey # manycast client
+
+# Enable public key cryptography.
+#crypto
+
+includefile /etc/ntp/crypto/pw
+
+# Key file containing the keys and key identifiers used when operating
+# with symmetric key cryptography.
+keys /etc/ntp/keys
+
+# Specify the key identifiers which are trusted.
+#trustedkey 4 8 42
+
+# Specify the key identifier to use with the ntpdc utility.
+#requestkey 8
+
+# Specify the key identifier to use with the ntpq utility.
+#controlkey 8
+
+# Enable writing of statistics records.
+#statistics clockstats cryptostats loopstats peerstats
diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl
index f870be67..72a37bf7 100644
--- a/templates/resolv.conf.tmpl
+++ b/templates/resolv.conf.tmpl
@@ -22,7 +22,7 @@ domain {{domain}}
sortlist {% for sort in sortlist %}{{sort}} {% endfor %}
{% endif %}
{#
- Flags and options are required to be on the
+ Flags and options are required to be on the
same line preceded by "options" keyword
#}
{% if options or flags %}
diff --git a/templates/systemd.resolved.conf.tmpl b/templates/systemd.resolved.conf.tmpl
new file mode 100644
index 00000000..fca50d37
--- /dev/null
+++ b/templates/systemd.resolved.conf.tmpl
@@ -0,0 +1,15 @@
+## template:jinja
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#
+[Resolve]
+LLMNR=false
+{% if nameservers is defined %}
+DNS={% for server in nameservers %}{{server}} {% endfor %}
+{% endif %}
+
+{% if searchdomains is defined %}
+Domains={% for search in searchdomains %}{{search}} {% endfor %}
+{% endif %}
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index 6249efc5..c52b78f9 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -133,6 +133,23 @@ features:
releases:
# UBUNTU =================================================================
+ impish:
+ # EOL: July 2022
+ default:
+ enabled: true
+ release: impish
+ version: "21.10"
+ os: ubuntu
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: impish
+ setup_overrides: null
+ override_templates: false
+
hirsute:
# EOL: Jan 2022
default:
diff --git a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
index e366c042..cdb1c28d 100644
--- a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
+++ b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
@@ -14,14 +14,14 @@ cloud_config: |
# For example the configuration below will have the following section
# added to puppet.conf:
# [puppetd]
- # server=puppetmaster.example.org
+ # server=puppetserver.example.org
# certname=i-0123456.ip-X-Y-Z.cloud.internal
#
# The puppmaster ca certificate will be available in
# /var/lib/puppet/ssl/certs/ca.pem
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
# certname supports substitutions at runtime:
# %i: instanceid
# Example: i-0123456
@@ -31,11 +31,11 @@ cloud_config: |
# NB: the certname will automatically be lowercased as required by puppet
certname: "%i.%f"
# ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
+ # It holds the puppetserver certificate in pem format.
# It should be a multi-line string (using the | yaml notation for
# multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ # The puppetserver certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host.
#
ca_cert: |
-----BEGIN CERTIFICATE-----
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 7dcccbdd..49baadb0 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -23,7 +23,7 @@ from tests.cloud_tests import LOG
OS_FAMILY_MAPPING = {
'debian': ['debian', 'ubuntu'],
- 'redhat': ['centos', 'rhel', 'fedora'],
+ 'redhat': ['centos', 'photon', 'rhel', 'fedora'],
'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
'suse': ['sles'],
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1
new file mode 100644
index 00000000..bd4c822e
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye
+t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV
+3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+
+yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY
+lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN
+HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw
+Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO
+geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2
+EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL
+NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/
+rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n
+vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2
+euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS
+khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg
+RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN
+oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT
+Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT
+tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi
+vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq
+KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA
+w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+
+qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6
++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA
+AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp
+60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E
+uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC
+77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ
+aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk
+cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb
+Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb
+GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB
+/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g
+0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I
+bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4
+CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub
new file mode 100644
index 00000000..3d2e26e1
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2
new file mode 100644
index 00000000..5854d901
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO
+LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+
+3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO
+i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH
+m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2
+17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5
+qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS
+yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2
+EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ
+A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq
+0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4
+qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP
+CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T
+wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH
+BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5
+ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+
+aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV
+RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd
+eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34
+qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql
+rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB
+w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy
+dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA
+wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj
+c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr
+IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy
+Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv
+vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u
+U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv
+/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9
+mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV
+zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd
+E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS
+0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD
+tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub
new file mode 100644
index 00000000..f3831a57
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3
new file mode 100644
index 00000000..2596c762
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV
+yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG
+bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH
+9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ
+ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV
+O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr
+jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm
+Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2
+EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL
+6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy
+fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e
+fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ
+iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f
+2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA
+QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ
+HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n
+Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK
+WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV
+JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ
+vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR
+2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8
+1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d
+DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA
+wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P
+yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy
+QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2
+0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k
+mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi
+uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9
+3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr
+VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM
+2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM
+GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa
+e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x
+eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub
new file mode 100644
index 00000000..057db632
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host
diff --git a/tests/integration_tests/assets/test_version_change.pkl b/tests/integration_tests/assets/test_version_change.pkl
new file mode 100644
index 00000000..65ae93e5
--- /dev/null
+++ b/tests/integration_tests/assets/test_version_change.pkl
Binary files differ
diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl
new file mode 100644
index 00000000..a4089ecf
--- /dev/null
+++ b/tests/integration_tests/assets/trusty_with_mime.pkl
@@ -0,0 +1,572 @@
+ccopy_reg
+_reconstructor
+p1
+(ccloudinit.sources.DataSourceNoCloud
+DataSourceNoCloudNet
+p2
+c__builtin__
+object
+p3
+NtRp4
+(dp5
+S'paths'
+p6
+g1
+(ccloudinit.helpers
+Paths
+p7
+g3
+NtRp8
+(dp9
+S'lookups'
+p10
+(dp11
+S'cloud_config'
+p12
+S'cloud-config.txt'
+p13
+sS'userdata'
+p14
+S'user-data.txt.i'
+p15
+sS'vendordata'
+p16
+S'vendor-data.txt.i'
+p17
+sS'userdata_raw'
+p18
+S'user-data.txt'
+p19
+sS'boothooks'
+p20
+g20
+sS'scripts'
+p21
+g21
+sS'sem'
+p22
+g22
+sS'data'
+p23
+g23
+sS'vendor_scripts'
+p24
+S'scripts/vendor'
+p25
+sS'handlers'
+p26
+g26
+sS'obj_pkl'
+p27
+S'obj.pkl'
+p28
+sS'vendordata_raw'
+p29
+S'vendor-data.txt'
+p30
+sS'vendor_cloud_config'
+p31
+S'vendor-cloud-config.txt'
+p32
+ssS'template_tpl'
+p33
+S'/etc/cloud/templates/%s.tmpl'
+p34
+sS'cfgs'
+p35
+(dp36
+S'cloud_dir'
+p37
+S'/var/lib/cloud/'
+p38
+sS'templates_dir'
+p39
+S'/etc/cloud/templates/'
+p40
+sS'upstart_dir'
+p41
+S'/etc/init/'
+p42
+ssS'cloud_dir'
+p43
+g38
+sS'datasource'
+p44
+NsS'upstart_conf_d'
+p45
+g42
+sS'boot_finished'
+p46
+S'/var/lib/cloud/instance/boot-finished'
+p47
+sS'instance_link'
+p48
+S'/var/lib/cloud/instance'
+p49
+sS'seed_dir'
+p50
+S'/var/lib/cloud/seed'
+p51
+sbsS'supported_seed_starts'
+p52
+(S'http://'
+p53
+S'https://'
+p54
+S'ftp://'
+p55
+tp56
+sS'sys_cfg'
+p57
+(dp58
+S'output'
+p59
+(dp60
+S'all'
+p61
+S'| tee -a /var/log/cloud-init-output.log'
+p62
+ssS'users'
+p63
+(lp64
+S'default'
+p65
+asS'def_log_file'
+p66
+S'/var/log/cloud-init.log'
+p67
+sS'cloud_final_modules'
+p68
+(lp69
+S'rightscale_userdata'
+p70
+aS'scripts-vendor'
+p71
+aS'scripts-per-once'
+p72
+aS'scripts-per-boot'
+p73
+aS'scripts-per-instance'
+p74
+aS'scripts-user'
+p75
+aS'ssh-authkey-fingerprints'
+p76
+aS'keys-to-console'
+p77
+aS'phone-home'
+p78
+aS'final-message'
+p79
+aS'power-state-change'
+p80
+asS'disable_root'
+p81
+I01
+sS'syslog_fix_perms'
+p82
+S'syslog:adm'
+p83
+sS'log_cfgs'
+p84
+(lp85
+(lp86
+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
+p87
+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
+p88
+aa(lp89
+g87
+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+p90
+aasS'cloud_init_modules'
+p91
+(lp92
+S'migrator'
+p93
+aS'seed_random'
+p94
+aS'bootcmd'
+p95
+aS'write-files'
+p96
+aS'growpart'
+p97
+aS'resizefs'
+p98
+aS'set_hostname'
+p99
+aS'update_hostname'
+p100
+aS'update_etc_hosts'
+p101
+aS'ca-certs'
+p102
+aS'rsyslog'
+p103
+aS'users-groups'
+p104
+aS'ssh'
+p105
+asS'preserve_hostname'
+p106
+I00
+sS'_log'
+p107
+(lp108
+g87
+ag90
+ag88
+asS'datasource_list'
+p109
+(lp110
+S'NoCloud'
+p111
+aS'ConfigDrive'
+p112
+aS'OpenNebula'
+p113
+aS'Azure'
+p114
+aS'AltCloud'
+p115
+aS'OVF'
+p116
+aS'MAAS'
+p117
+aS'GCE'
+p118
+aS'OpenStack'
+p119
+aS'CloudSigma'
+p120
+aS'Ec2'
+p121
+aS'CloudStack'
+p122
+aS'SmartOS'
+p123
+aS'None'
+p124
+asS'vendor_data'
+p125
+(dp126
+S'prefix'
+p127
+(lp128
+sS'enabled'
+p129
+I01
+ssS'cloud_config_modules'
+p130
+(lp131
+S'emit_upstart'
+p132
+aS'disk_setup'
+p133
+aS'mounts'
+p134
+aS'ssh-import-id'
+p135
+aS'locale'
+p136
+aS'set-passwords'
+p137
+aS'grub-dpkg'
+p138
+aS'apt-pipelining'
+p139
+aS'apt-configure'
+p140
+aS'package-update-upgrade-install'
+p141
+aS'landscape'
+p142
+aS'timezone'
+p143
+aS'puppet'
+p144
+aS'chef'
+p145
+aS'salt-minion'
+p146
+aS'mcollective'
+p147
+aS'disable-ec2-metadata'
+p148
+aS'runcmd'
+p149
+aS'byobu'
+p150
+assg14
+(iemail.mime.multipart
+MIMEMultipart
+p151
+(dp152
+S'_headers'
+p153
+(lp154
+(S'Content-Type'
+p155
+S'multipart/mixed; boundary="===============4291038100093149247=="'
+tp156
+a(S'MIME-Version'
+p157
+S'1.0'
+p158
+tp159
+a(S'Number-Attachments'
+p160
+S'1'
+tp161
+asS'_payload'
+p162
+(lp163
+(iemail.mime.base
+MIMEBase
+p164
+(dp165
+g153
+(lp166
+(g157
+g158
+tp167
+a(S'Content-Type'
+p168
+S'text/x-not-multipart'
+tp169
+a(S'Content-Disposition'
+p170
+S'attachment; filename="part-001"'
+tp171
+asg162
+S''
+sS'_charset'
+p172
+NsS'_default_type'
+p173
+S'text/plain'
+p174
+sS'preamble'
+p175
+NsS'defects'
+p176
+(lp177
+sS'_unixfrom'
+p178
+NsS'epilogue'
+p179
+Nsbasg172
+Nsg173
+g174
+sg175
+Nsg176
+(lp180
+sg178
+Nsg179
+Nsbsg16
+S'#cloud-config\n{}\n\n'
+p181
+sg18
+S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n'
+p182
+sg29
+NsS'dsmode'
+p183
+S'net'
+p184
+sS'seed'
+p185
+S'/var/lib/cloud/seed/nocloud-net'
+p186
+sS'cmdline_id'
+p187
+S'ds=nocloud-net'
+p188
+sS'ud_proc'
+p189
+g1
+(ccloudinit.user_data
+UserDataProcessor
+p190
+g3
+NtRp191
+(dp192
+g6
+g8
+sS'ssl_details'
+p193
+(dp194
+sbsg50
+g186
+sS'ds_cfg'
+p195
+(dp196
+sS'distro'
+p197
+g1
+(ccloudinit.distros.ubuntu
+Distro
+p198
+g3
+NtRp199
+(dp200
+S'osfamily'
+p201
+S'debian'
+p202
+sS'_paths'
+p203
+g8
+sS'name'
+p204
+S'ubuntu'
+p205
+sS'_runner'
+p206
+g1
+(ccloudinit.helpers
+Runners
+p207
+g3
+NtRp208
+(dp209
+g6
+g8
+sS'sems'
+p210
+(dp211
+sbsS'_cfg'
+p212
+(dp213
+S'paths'
+p214
+(dp215
+g37
+g38
+sg39
+g40
+sg41
+g42
+ssS'default_user'
+p216
+(dp217
+S'shell'
+p218
+S'/bin/bash'
+p219
+sS'name'
+p220
+S'ubuntu'
+p221
+sS'sudo'
+p222
+(lp223
+S'ALL=(ALL) NOPASSWD:ALL'
+p224
+asS'lock_passwd'
+p225
+I01
+sS'gecos'
+p226
+S'Ubuntu'
+p227
+sS'groups'
+p228
+(lp229
+S'adm'
+p230
+aS'audio'
+p231
+aS'cdrom'
+p232
+aS'dialout'
+p233
+aS'dip'
+p234
+aS'floppy'
+p235
+aS'netdev'
+p236
+aS'plugdev'
+p237
+aS'sudo'
+p238
+aS'video'
+p239
+assS'package_mirrors'
+p240
+(lp241
+(dp242
+S'arches'
+p243
+(lp244
+S'i386'
+p245
+aS'amd64'
+p246
+asS'failsafe'
+p247
+(dp248
+S'security'
+p249
+S'http://security.ubuntu.com/ubuntu'
+p250
+sS'primary'
+p251
+S'http://archive.ubuntu.com/ubuntu'
+p252
+ssS'search'
+p253
+(dp254
+S'security'
+p255
+(lp256
+sS'primary'
+p257
+(lp258
+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
+p259
+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
+p260
+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
+p261
+assa(dp262
+S'arches'
+p263
+(lp264
+S'armhf'
+p265
+aS'armel'
+p266
+aS'default'
+p267
+asS'failsafe'
+p268
+(dp269
+S'security'
+p270
+S'http://ports.ubuntu.com/ubuntu-ports'
+p271
+sS'primary'
+p272
+S'http://ports.ubuntu.com/ubuntu-ports'
+p273
+ssasS'ssh_svcname'
+p274
+S'ssh'
+p275
+ssbsS'metadata'
+p276
+(dp277
+g183
+g184
+sS'local-hostname'
+p278
+S'me'
+p279
+sS'instance-id'
+p280
+S'me'
+p281
+ssb. \ No newline at end of file
diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py
new file mode 100644
index 00000000..838efca6
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh868.py
@@ -0,0 +1,20 @@
+"""Ensure no Traceback when 'chef_license' is set"""
+import pytest
+from tests.integration_tests.instances import IntegrationInstance
+
+
+USERDATA = """\
+#cloud-config
+chef:
+ install_type: omnibus
+ chef_license: accept
+ server_url: https://chef.yourorg.invalid
+ validation_name: some-validator
+"""
+
+
+@pytest.mark.adhoc # Can't be regularly reaching out to chef install script
+@pytest.mark.user_data(USERDATA)
+def test_chef_license(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Traceback' not in log
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 3bbccb44..f2362b5d 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -142,12 +142,12 @@ class IntegrationCloud(ABC):
except (ValueError, IndexError):
return image.image_id
- def _perform_launch(self, launch_kwargs):
+ def _perform_launch(self, launch_kwargs, **kwargs):
pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
return pycloudlib_instance
def launch(self, user_data=None, launch_kwargs=None,
- settings=integration_settings):
+ settings=integration_settings, **kwargs):
if launch_kwargs is None:
launch_kwargs = {}
if self.settings.EXISTING_INSTANCE_ID:
@@ -157,22 +157,22 @@ class IntegrationCloud(ABC):
self.instance = self.cloud_instance.get_instance(
self.settings.EXISTING_INSTANCE_ID
)
- return
- kwargs = {
+ return self.instance
+ default_launch_kwargs = {
'image_id': self.image_id,
'user_data': user_data,
}
- kwargs.update(launch_kwargs)
+ launch_kwargs = {**default_launch_kwargs, **launch_kwargs}
log.info(
"Launching instance with launch_kwargs:\n%s",
- "\n".join("{}={}".format(*item) for item in kwargs.items())
+ "\n".join("{}={}".format(*item) for item in launch_kwargs.items())
)
with emit_dots_on_travis():
- pycloudlib_instance = self._perform_launch(kwargs)
+ pycloudlib_instance = self._perform_launch(launch_kwargs, **kwargs)
log.info('Launched instance: %s', pycloudlib_instance)
instance = self.get_instance(pycloudlib_instance, settings)
- if kwargs.get('wait', True):
+ if launch_kwargs.get('wait', True):
# If we aren't waiting, we can't rely on command execution here
log.info(
'cloud-init version: %s',
@@ -247,8 +247,15 @@ class OciCloud(IntegrationCloud):
integration_instance_cls = IntegrationOciInstance
def _get_cloud_instance(self):
+ if not integration_settings.ORACLE_AVAILABILITY_DOMAIN:
+ raise Exception(
+ 'ORACLE_AVAILABILITY_DOMAIN must be set to a valid '
+ 'availability domain. If using the oracle CLI, '
+ 'try `oci iam availability-domain list`'
+ )
return OCI(
- tag='oci-integration-test'
+ tag='oci-integration-test',
+ availability_domain=integration_settings.ORACLE_AVAILABILITY_DOMAIN
)
@@ -292,7 +299,7 @@ class _LxdIntegrationCloud(IntegrationCloud):
).format(**format_variables)
subp(command.split())
- def _perform_launch(self, launch_kwargs):
+ def _perform_launch(self, launch_kwargs, **kwargs):
launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None)
wait = launch_kwargs.pop('wait', True)
release = launch_kwargs.pop('image_id')
@@ -310,6 +317,9 @@ class _LxdIntegrationCloud(IntegrationCloud):
)
if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
self._mount_source(pycloudlib_instance)
+ if 'lxd_setup' in kwargs:
+ log.info("Running callback specified by 'lxd_setup' mark")
+ kwargs['lxd_setup'](pycloudlib_instance)
pycloudlib_instance.start(wait=wait)
return pycloudlib_instance
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 6f4ce8d3..5a543e39 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -213,6 +213,7 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud):
user_data = getter('user_data')
name = getter('instance_name')
lxd_config_dict = getter('lxd_config_dict')
+ lxd_setup = getter('lxd_setup')
lxd_use_exec = fixture_utils.closest_marker_args_or(
request, 'lxd_use_exec', None
)
@@ -238,9 +239,14 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud):
# run anywhere else. A failure flags up this discrepancy.
pytest.fail(XENIAL_LXD_VM_EXEC_MSG)
launch_kwargs["execute_via_ssh"] = False
+ local_launch_kwargs = {}
+ if lxd_setup is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip('lxd_setup requres LXD')
+ local_launch_kwargs['lxd_setup'] = lxd_setup
with session_cloud.launch(
- user_data=user_data, launch_kwargs=launch_kwargs
+ user_data=user_data, launch_kwargs=launch_kwargs, **local_launch_kwargs
) as instance:
if lxd_use_exec is not None:
# Existing instances are not affected by the launch kwargs, so
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index 0703be58..e4a790c2 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -98,6 +98,13 @@ KEYPAIR_NAME = None
OPENSTACK_NETWORK = None
##################################################################
+# OCI SETTINGS
+##################################################################
+# Availability domain to use for Oracle. Should be one of the namess found
+# in `oci iam availability-domain list`
+ORACLE_AVAILABILITY_DOMAIN = None
+
+##################################################################
# USER SETTINGS OVERRIDES
##################################################################
# Bring in any user-file defined settings
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
new file mode 100644
index 00000000..27f3c074
--- /dev/null
+++ b/tests/integration_tests/modules/test_combined.py
@@ -0,0 +1,195 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""A set of somewhat unrelated tests that can be combined into a single
+instance launch. Generally tests should only be added here if a failure
+of the test would be unlikely to affect the running of another test using
+the same instance launch. Most independent module coherence tests can go
+here.
+"""
+import json
+import pytest
+import re
+from datetime import date
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+## template: jinja
+#cloud-config
+apt:
+ primary:
+ - arches: [default]
+ uri: http://us.archive.ubuntu.com/ubuntu/
+byobu_by_default: enable
+final_message: |
+ This is my final message!
+ $version
+ $timestamp
+ $datasource
+ $uptime
+locale: en_GB.UTF-8
+locale_configfile: /etc/default/locale
+ntp:
+ servers: ['ntp.ubuntu.com']
+runcmd:
+ - echo {{ds.meta_data.local_hostname}} > /var/tmp/runcmd_output
+ - echo {{merged_cfg.def_log_file}} >> /var/tmp/runcmd_output
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestCombined:
+ def test_final_message(self, class_client: IntegrationInstance):
+ """Test that final_message module works as expected.
+
+ Also tests LP 1511485: final_message is silent
+ """
+ client = class_client
+ log = client.read_from_file('/var/log/cloud-init.log')
+ today = date.today().strftime('%a, %d %b %Y')
+ expected = (
+ 'This is my final message!\n'
+ r'\d+\.\d+.*\n'
+ '{}.*\n'
+ 'DataSource.*\n'
+ r'\d+\.\d+'
+ ).format(today)
+
+ assert re.search(expected, log)
+
+ def test_ntp_with_apt(self, class_client: IntegrationInstance):
+ """LP #1628337.
+
+ cloud-init tries to install NTP before even
+ configuring the archives.
+ """
+ client = class_client
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'W: Failed to fetch' not in log
+ assert 'W: Some index files failed to download' not in log
+ assert 'E: Unable to locate package ntp' not in log
+
+ def test_byobu(self, class_client: IntegrationInstance):
+ """Test byobu configured as enabled by default."""
+ client = class_client
+ assert client.execute('test -e "/etc/byobu/autolaunch"').ok
+
+ def test_configured_locale(self, class_client: IntegrationInstance):
+ """Test locale can be configured correctly."""
+ client = class_client
+ default_locale = client.read_from_file('/etc/default/locale')
+ assert 'LANG=en_GB.UTF-8' in default_locale
+
+ locale_a = client.execute('locale -a')
+ verify_ordered_items_in_text([
+ 'en_GB.utf8',
+ 'en_US.utf8'
+ ], locale_a)
+
+ locale_gen = client.execute(
+ "cat /etc/locale.gen | grep -v '^#' | uniq"
+ )
+ verify_ordered_items_in_text([
+ 'en_GB.UTF-8',
+ 'en_US.UTF-8'
+ ], locale_gen)
+
+ def test_runcmd_with_variable_substitution(
+ self, class_client: IntegrationInstance
+ ):
+ """Test runcmd, while including jinja substitution.
+
+ Ensure we can also substitue variables from instance-data-sensitive
+ LP: #1931392
+ """
+ client = class_client
+ expected = [
+ client.execute('hostname').stdout.strip(),
+ '/var/log/cloud-init.log',
+ ]
+ output = client.read_from_file('/var/tmp/runcmd_output')
+ verify_ordered_items_in_text(expected, output)
+
+ def test_no_problems(self, class_client: IntegrationInstance):
+ """Test no errors, warnings, or tracebacks"""
+ client = class_client
+ status_file = client.read_from_file('/run/cloud-init/status.json')
+ status_json = json.loads(status_file)['v1']
+ for stage in ('init', 'init-local', 'modules-config', 'modules-final'):
+ assert status_json[stage]['errors'] == []
+ result_file = client.read_from_file('/run/cloud-init/result.json')
+ result_json = json.loads(result_file)['v1']
+ assert result_json['errors'] == []
+
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'WARN' not in log
+ assert 'Traceback' not in log
+
+ def _check_common_metadata(self, data):
+ assert data['base64_encoded_keys'] == []
+ assert data['merged_cfg'] == 'redacted for non-root user'
+
+ image_spec = ImageSpecification.from_os_image()
+ assert data['sys_info']['dist'][0] == image_spec.os
+
+ v1_data = data['v1']
+ assert re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release'])
+ assert v1_data['variant'] == image_spec.os
+ assert v1_data['distro'] == image_spec.os
+ assert v1_data['distro_release'] == image_spec.release
+ assert v1_data['machine'] == 'x86_64'
+ assert re.match(r'3.\d\.\d', v1_data['python_version'])
+
+ @pytest.mark.lxd_container
+ def test_instance_json_lxd(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ '/run/cloud-init/instance-data.json')
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data['v1']
+ assert v1_data['cloud_name'] == 'unknown'
+ assert v1_data['platform'] == 'lxd'
+ assert v1_data['subplatform'] == (
+ 'seed-dir (/var/lib/cloud/seed/nocloud-net)')
+ assert v1_data['availability_zone'] is None
+ assert v1_data['instance_id'] == client.instance.name
+ assert v1_data['local_hostname'] == client.instance.name
+ assert v1_data['region'] is None
+
+ @pytest.mark.lxd_vm
+ def test_instance_json_lxd_vm(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ '/run/cloud-init/instance-data.json')
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data['v1']
+ assert v1_data['cloud_name'] == 'unknown'
+ assert v1_data['platform'] == 'lxd'
+ assert v1_data['subplatform'] == (
+ 'seed-dir (/var/lib/cloud/seed/nocloud-net)')
+ assert v1_data['availability_zone'] is None
+ assert v1_data['instance_id'] == client.instance.name
+ assert v1_data['local_hostname'] == client.instance.name
+ assert v1_data['region'] is None
+
+ @pytest.mark.ec2
+ def test_instance_json_ec2(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ '/run/cloud-init/instance-data.json')
+ data = json.loads(instance_json_file)
+ v1_data = data['v1']
+ assert v1_data['cloud_name'] == 'aws'
+ assert v1_data['platform'] == 'ec2'
+ assert v1_data['subplatform'].startswith('metadata')
+ assert v1_data[
+ 'availability_zone'] == client.instance.availability_zone
+ assert v1_data['instance_id'] == client.instance.name
+ assert v1_data['local_hostname'].startswith('ip-')
+ assert v1_data['region'] == client.cloud.cloud_instance.region
diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py
new file mode 100644
index 00000000..15033642
--- /dev/null
+++ b/tests/integration_tests/modules/test_command_output.py
@@ -0,0 +1,23 @@
+"""Integration test for output redirection.
+
+This test redirects the output of a command to a file and then checks the file.
+
+(This is ported from
+``tests/cloud_tests/testcases/main/command_output_simple.yaml``.)"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+USER_DATA = """\
+#cloud-config
+output: { all: "| tee -a /var/log/cloud-init-test-output" }
+final_message: "should be last line in cloud-init-test-output file"
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+def test_runcmd(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init-test-output')
+ assert 'should be last line in cloud-init-test-output file' in log
diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py
new file mode 100644
index 00000000..1fc96c52
--- /dev/null
+++ b/tests/integration_tests/modules/test_disk_setup.py
@@ -0,0 +1,192 @@
+import json
+import os
+import pytest
+from uuid import uuid4
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+
+DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4())
+
+
+def setup_and_mount_lxd_disk(instance: LXDInstance):
+ subp('lxc config device add {} test-disk-setup-disk disk source={}'.format(
+ instance.name, DISK_PATH).split())
+
+
+@pytest.yield_fixture
+def create_disk():
+ # 640k should be enough for anybody
+ subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split())
+ yield
+ os.remove(DISK_PATH)
+
+
+ALIAS_USERDATA = """\
+#cloud-config
+device_aliases:
+ my_alias: /dev/sdb
+disk_setup:
+ my_alias:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+- label: fs1
+ device: my_alias.1
+ filesystem: ext4
+- label: fs2
+ device: my_alias.2
+ filesystem: ext4
+mounts:
+- ["my_alias.1", "/mnt1"]
+- ["my_alias.2", "/mnt2"]
+"""
+
+
+@pytest.mark.user_data(ALIAS_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestDeviceAliases:
+ """Test devices aliases work on disk setup/mount"""
+
+ def test_device_alias(self, create_disk, client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert (
+ "updated disk_setup device entry 'my_alias' to '/dev/sdb'"
+ ) in log
+ assert 'changed my_alias.1 => /dev/sdb1' in log
+ assert 'changed my_alias.2 => /dev/sdb2' in log
+ assert 'WARN' not in log
+ assert 'Traceback' not in log
+
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 2
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt1'
+ assert sdb['children'][1]['name'] == 'sdb2'
+ assert sdb['children'][1]['mountpoint'] == '/mnt2'
+
+
+PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+ - label: test
+ device: /dev/sdb1
+ filesystem: ext4
+ - label: test2
+ device: /dev/sdb2
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt1"]
+- ["/dev/sdb2", "/mnt2"]
+"""
+
+UPDATED_PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [100]
+ overwrite: True
+fs_setup:
+ - label: test3
+ device: /dev/sdb1
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt3"]
+"""
+
+
+@pytest.mark.user_data(PARTPROBE_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestPartProbeAvailability:
+ """Test disk setup works with partprobe
+
+ Disk setup can run successfully on a mounted partition when
+ partprobe is being used.
+
+ lp-1920939
+ """
+
+ def _verify_first_disk_setup(self, client, log):
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 2
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt1'
+ assert sdb['children'][1]['name'] == 'sdb2'
+ assert sdb['children'][1]['mountpoint'] == '/mnt2'
+
+ # Not bionic or xenial because the LXD agent gets in the way of us
+ # changing the userdata
+ @pytest.mark.not_bionic
+ @pytest.mark.not_xenial
+ def test_disk_setup_when_mounted(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Test lp-1920939.
+
+ We insert an extra disk into our VM, format it to have two partitions,
+ modify our cloud config to mount devices before disk setup, and modify
+ our userdata to setup a single partition on the disk.
+
+ This allows cloud-init to attempt disk setup on a mounted partition.
+ When blockdev is in use, it will fail with
+ "blockdev: ioctl error on BLKRRPART: Device or resource busy" along
+ with a warning and a traceback. When partprobe is in use, everything
+ should work successfully.
+ """
+ log = client.read_from_file('/var/log/cloud-init.log')
+ self._verify_first_disk_setup(client, log)
+
+ # Update our userdata and cloud.cfg to mount then perform new disk
+ # setup
+ client.write_to_file(
+ '/var/lib/cloud/seed/nocloud-net/user-data',
+ UPDATED_PARTPROBE_USERDATA,
+ )
+ client.execute(
+ "sed -i 's/write-files/write-files\\n - mounts/' "
+ "/etc/cloud/cloud.cfg"
+ )
+
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+ # Assert new setup works as expected
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 1
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt3'
+
+ def test_disk_setup_no_partprobe(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Ensure disk setup still works as expected without partprobe."""
+ # We can't do this part in a bootcmd because the path has already
+ # been found by the time we get to the bootcmd
+ client.execute('rm $(which partprobe)')
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+ log = client.read_from_file('/var/log/cloud-init.log')
+ self._verify_first_disk_setup(client, log)
+
+ assert 'partprobe' not in log
diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py
new file mode 100644
index 00000000..a42d1c8c
--- /dev/null
+++ b/tests/integration_tests/modules/test_hotplug.py
@@ -0,0 +1,102 @@
+import pytest
+import time
+import yaml
+from collections import namedtuple
+
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: ['hotplug']
+"""
+
+ip_addr = namedtuple('ip_addr', 'interface state ip4 ip6')
+
+
+def _wait_till_hotplug_complete(client, expected_runs=1):
+ for _ in range(60):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ if log.count('Exiting hotplug handler') == expected_runs:
+ return log
+ time.sleep(1)
+ raise Exception('Waiting for hotplug handler failed')
+
+
+def _get_ip_addr(client):
+ ips = []
+ lines = client.execute('ip --brief addr').split('\n')
+ for line in lines:
+ attributes = line.split()
+ interface, state = attributes[0], attributes[1]
+ ip4_cidr = attributes[2] if len(attributes) > 2 else None
+ ip6_cidr = attributes[3] if len(attributes) > 3 else None
+ ip4 = ip4_cidr.split('/')[0] if ip4_cidr else None
+ ip6 = ip6_cidr.split('/')[0] if ip6_cidr else None
+ ip = ip_addr(interface, state, ip4, ip6)
+ ips.append(ip)
+ return ips
+
+
+@pytest.mark.openstack
+@pytest.mark.user_data(USER_DATA)
+def test_hotplug_add_remove(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Exiting hotplug handler' not in log
+
+ # Add new NIC
+ added_ip = client.instance.add_network_interface()
+ _wait_till_hotplug_complete(client, expected_runs=2)
+ ips_after_add = _get_ip_addr(client)
+ new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0]
+
+ assert len(ips_after_add) == len(ips_before) + 1
+ assert added_ip not in [ip.ip4 for ip in ips_before]
+ assert added_ip in [ip.ip4 for ip in ips_after_add]
+ assert new_addition.state == 'UP'
+
+ netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml')
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface in config['network']['ethernets']
+
+ # Remove new NIC
+ client.instance.remove_network_interface(added_ip)
+ _wait_till_hotplug_complete(client, expected_runs=4)
+ ips_after_remove = _get_ip_addr(client)
+ assert len(ips_after_remove) == len(ips_before)
+ assert added_ip not in [ip.ip4 for ip in ips_after_remove]
+
+ netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml')
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface not in config['network']['ethernets']
+
+ assert 'enabled' == client.execute(
+ 'cloud-init devel hotplug-hook -s net query'
+ )
+
+
+@pytest.mark.openstack
+def test_no_hotplug_in_userdata(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Exiting hotplug handler' not in log
+
+ # Add new NIC
+ client.instance.add_network_interface()
+ _wait_till_hotplug_complete(client)
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert "Event Denied: scopes=['network'] EventType=hotplug" in log
+
+ ips_after_add = _get_ip_addr(client)
+ if len(ips_after_add) == len(ips_before) + 1:
+ # We can see the device, but it should not have been brought up
+ new_ip = [ip for ip in ips_after_add if ip not in ips_before][0]
+ assert new_ip.state == 'DOWN'
+ else:
+ assert len(ips_after_add) == len(ips_before)
+
+ assert 'disabled' == client.execute(
+ 'cloud-init devel hotplug-hook -s net query'
+ )
diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py
index e72389c1..7a799139 100644
--- a/tests/integration_tests/modules/test_ntp_servers.py
+++ b/tests/integration_tests/modules/test_ntp_servers.py
@@ -1,15 +1,19 @@
-"""Integration test for the ntp module's ``servers`` functionality with ntp.
+"""Integration test for the ntp module's ntp functionality.
This test specifies the use of the `ntp` NTP client, and ensures that the given
NTP servers are configured as expected.
-(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``.)
+(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``,
+``tests/cloud_tests/testcases/modules/ntp_pools.yaml``,
+and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``)
"""
import re
import yaml
import pytest
+from tests.integration_tests.instances import IntegrationInstance
+
USER_DATA = """\
#cloud-config
ntp:
@@ -17,21 +21,26 @@ ntp:
servers:
- 172.16.15.14
- 172.16.17.18
+ pools:
+ - 0.cloud-init.mypool
+ - 1.cloud-init.mypool
+ - 172.16.15.15
"""
EXPECTED_SERVERS = yaml.safe_load(USER_DATA)["ntp"]["servers"]
+EXPECTED_POOLS = yaml.safe_load(USER_DATA)["ntp"]["pools"]
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestNtpServers:
- def test_ntp_installed(self, class_client):
+ def test_ntp_installed(self, class_client: IntegrationInstance):
"""Test that `ntpd --version` succeeds, indicating installation."""
- result = class_client.execute("ntpd --version")
- assert 0 == result.return_code
+ assert class_client.execute("ntpd --version").ok
- def test_dist_config_file_is_empty(self, class_client):
+ def test_dist_config_file_is_empty(self,
+ class_client: IntegrationInstance):
"""Test that the distributed config file is empty.
(This test is skipped on all currently supported Ubuntu releases, so
@@ -42,7 +51,7 @@ class TestNtpServers:
dist_file = class_client.read_from_file("/etc/ntp.conf.dist")
assert 0 == len(dist_file.strip().splitlines())
- def test_ntp_entries(self, class_client):
+ def test_ntp_entries(self, class_client: IntegrationInstance):
ntp_conf = class_client.read_from_file("/etc/ntp.conf")
for expected_server in EXPECTED_SERVERS:
assert re.search(
@@ -50,9 +59,69 @@ class TestNtpServers:
ntp_conf,
re.MULTILINE
)
+ for expected_pool in EXPECTED_POOLS:
+ assert re.search(
+ r"^pool {} iburst".format(expected_pool),
+ ntp_conf,
+ re.MULTILINE
+ )
- def test_ntpq_servers(self, class_client):
+ def test_ntpq_servers(self, class_client: IntegrationInstance):
result = class_client.execute("ntpq -p -w -n")
assert result.ok
- for expected_server in EXPECTED_SERVERS:
- assert expected_server in result.stdout
+ for expected_server_or_pool in [*EXPECTED_SERVERS, *EXPECTED_POOLS]:
+ assert expected_server_or_pool in result.stdout
+
+
+CHRONY_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: chrony
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(CHRONY_DATA)
+def test_chrony(client: IntegrationInstance):
+ if client.execute('test -f /etc/chrony.conf').ok:
+ chrony_conf = '/etc/chrony.conf'
+ else:
+ chrony_conf = '/etc/chrony/chrony.conf'
+ contents = client.read_from_file(chrony_conf)
+ assert '.pool.ntp.org' in contents
+
+
+TIMESYNCD_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: systemd-timesyncd
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(TIMESYNCD_DATA)
+def test_timesyncd(client: IntegrationInstance):
+ contents = client.read_from_file(
+ '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
+ )
+ assert '.pool.ntp.org' in contents
+
+
+EMPTY_NTP = """\
+#cloud-config
+ntp:
+ ntp_client: ntp
+ pools: []
+ servers: []
+"""
+
+
+@pytest.mark.user_data(EMPTY_NTP)
+def test_empty_ntp(client: IntegrationInstance):
+ assert client.execute('ntpd --version').ok
+ assert client.execute('test -f /etc/ntp.conf.dist').failed
+ assert 'pool.ntp.org iburst' in client.execute(
+ 'grep -v "^#" /etc/ntp.conf'
+ )
diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py
new file mode 100644
index 00000000..00fdeaea
--- /dev/null
+++ b/tests/integration_tests/modules/test_persistence.py
@@ -0,0 +1,30 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Test the behavior of loading/discarding pickle data"""
+from pathlib import Path
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import (
+ ASSETS_DIR,
+ verify_ordered_items_in_text,
+)
+
+
+PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl')
+TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl'
+
+
+@pytest.mark.lxd_container
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ assert client.execute('cloud-init status --wait').ok
+ log = client.read_from_file('/var/log/cloud-init.log')
+ verify_ordered_items_in_text([
+ "Unable to unpickle datasource: 'MIMEMultipart' object has no "
+ "attribute 'policy'. Ignoring current cache.",
+ 'no cache found',
+ 'Searching for local data source',
+ 'SUCCESS: found local data from DataSourceNoCloud'
+ ], log)
diff --git a/tests/integration_tests/modules/test_runcmd.py b/tests/integration_tests/modules/test_runcmd.py
deleted file mode 100644
index 50d1851e..00000000
--- a/tests/integration_tests/modules/test_runcmd.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Integration test for the runcmd module.
-
-This test specifies a command to be executed by the ``runcmd`` module
-and then checks if that command was executed during boot.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-"""
-
-
-@pytest.mark.ci
-class TestRuncmd:
-
- @pytest.mark.user_data(USER_DATA)
- def test_runcmd(self, client):
- runcmd_output = client.read_from_file("/var/tmp/run_cmd")
- assert runcmd_output.strip() == "cloud-init run cmd test"
diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py
index 481edbaa..652efa68 100644
--- a/tests/integration_tests/modules/test_snap.py
+++ b/tests/integration_tests/modules/test_snap.py
@@ -4,7 +4,7 @@ This test specifies a command to be executed by the ``snap`` module
and then checks that if that command was executed during boot.
(This is ported from
-``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
+``tests/cloud_tests/testcases/modules/snap.yaml``.)"""
import pytest
diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
index b9b0d85e..e1946cb1 100644
--- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
+++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
@@ -18,7 +18,7 @@ USER_DATA_SSH_AUTHKEY_DISABLE = """\
no_ssh_fingerprints: true
"""
-USER_DATA_SSH_AUTHKEY_ENABLE="""\
+USER_DATA_SSH_AUTHKEY_ENABLE = """\
#cloud-config
ssh_genkeytypes:
- ecdsa
diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py
index 3db573b5..b90fe95f 100644
--- a/tests/integration_tests/modules/test_ssh_import_id.py
+++ b/tests/integration_tests/modules/test_ssh_import_id.py
@@ -12,6 +12,7 @@ TODO:
import pytest
+from tests.integration_tests.util import retry
USER_DATA = """\
#cloud-config
@@ -26,6 +27,11 @@ ssh_import_id:
class TestSshImportId:
@pytest.mark.user_data(USER_DATA)
+ # Retry is needed here because ssh import id is one of the last modules
+ # run, and it fires off a web request, then continues with the rest of
+ # cloud-init. It is possible cloud-init's status is "done" before the
+ # id's have been fully imported.
+ @retry(tries=30, delay=1)
def test_ssh_import_id(self, client):
ssh_output = client.read_from_file(
"/home/ubuntu/.ssh/authorized_keys")
diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py
new file mode 100644
index 00000000..5c720578
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_keysfile.py
@@ -0,0 +1,191 @@
+import paramiko
+import pytest
+from io import StringIO
+from paramiko.ssh_exception import SSHException
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import get_test_rsa_keypair
+
+TEST_USER1_KEYS = get_test_rsa_keypair('test1')
+TEST_USER2_KEYS = get_test_rsa_keypair('test2')
+TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3')
+
+_USERDATA = """\
+#cloud-config
+bootcmd:
+ - {bootcmd}
+ssh_authorized_keys:
+ - {default}
+users:
+- default
+- name: test_user1
+ ssh_authorized_keys:
+ - {user1}
+- name: test_user2
+ ssh_authorized_keys:
+ - {user2}
+""".format(
+ bootcmd='{bootcmd}',
+ default=TEST_DEFAULT_KEYS.public_key,
+ user1=TEST_USER1_KEYS.public_key,
+ user2=TEST_USER2_KEYS.public_key,
+)
+
+
+def common_verify(client, expected_keys):
+ for user, filename, keys in expected_keys:
+ # Ensure key is in the key file
+ contents = client.read_from_file(filename)
+ if user in ['ubuntu', 'root']:
+ # Our personal public key gets added by pycloudlib
+ lines = contents.split('\n')
+ assert len(lines) == 2
+ assert keys.public_key.strip() in contents
+ else:
+ assert contents.strip() == keys.public_key.strip()
+
+ # Ensure we can actually connect
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ paramiko_key = paramiko.RSAKey.from_private_key(StringIO(
+ keys.private_key))
+
+ # Will fail with AuthenticationException if
+ # we cannot connect
+ ssh.connect(
+ client.instance.ip,
+ username=user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
+
+ # Ensure other uses can't connect using our key
+ other_users = [u[0] for u in expected_keys if u[2] != keys]
+ for other_user in other_users:
+ with pytest.raises(SSHException):
+ print('trying to connect as {} with key from {}'.format(
+ other_user, user))
+ ssh.connect(
+ client.instance.ip,
+ username=other_user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
+
+ # Ensure we haven't messed with any /home permissions
+ # See LP: #1940233
+ home_dir = '/home/{}'.format(user)
+ # Home permissions aren't consistent between releases. On ubuntu
+ # this can change to 750 once focal is unsupported.
+ if ImageSpecification.from_os_image().release in ("bionic", "focal"):
+ home_perms = '755'
+ else:
+ home_perms = '750'
+ if user == 'root':
+ home_dir = '/root'
+ home_perms = '700'
+ assert '{} {}'.format(user, home_perms) == client.execute(
+ 'stat -c "%U %a" {}'.format(home_dir)
+ )
+ if client.execute("test -d {}/.ssh".format(home_dir)).ok:
+ assert '{} 700'.format(user) == client.execute(
+ 'stat -c "%U %a" {}/.ssh'.format(home_dir)
+ )
+ assert '{} 600'.format(user) == client.execute(
+ 'stat -c "%U %a" {}'.format(filename)
+ )
+
+ # Also ensure ssh-keygen works as expected
+ client.execute('mkdir {}/.ssh'.format(home_dir))
+ assert client.execute(
+ "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format(
+ home_dir)
+ ).ok
+ assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir))
+ assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir))
+
+ assert 'root 755' == client.execute('stat -c "%U %a" /home')
+
+
+DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""')
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DEFAULT_KEYS_USERDATA)
+def test_authorized_keys_default(client: IntegrationInstance):
+ expected_keys = [
+ ('test_user1', '/home/test_user1/.ssh/authorized_keys',
+ TEST_USER1_KEYS),
+ ('test_user2', '/home/test_user2/.ssh/authorized_keys',
+ TEST_USER2_KEYS),
+ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys',
+ TEST_DEFAULT_KEYS),
+ ('root', '/root/.ssh/authorized_keys', TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' "
+ "/etc/ssh/sshd_config"))
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA)
+def test_authorized_keys2(client: IntegrationInstance):
+ expected_keys = [
+ ('test_user1', '/home/test_user1/.ssh/authorized_keys2',
+ TEST_USER1_KEYS),
+ ('test_user2', '/home/test_user2/.ssh/authorized_keys2',
+ TEST_USER2_KEYS),
+ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2',
+ TEST_DEFAULT_KEYS),
+ ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+NESTED_KEYS_USERDATA = _USERDATA.format(bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' "
+ "/etc/ssh/sshd_config"))
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(NESTED_KEYS_USERDATA)
+def test_nested_keys(client: IntegrationInstance):
+ expected_keys = [
+ ('test_user1', '/home/test_user1/foo/bar/ssh/keys',
+ TEST_USER1_KEYS),
+ ('test_user2', '/home/test_user2/foo/bar/ssh/keys',
+ TEST_USER2_KEYS),
+ ('ubuntu', '/home/ubuntu/foo/bar/ssh/keys',
+ TEST_DEFAULT_KEYS),
+ ('root', '/root/foo/bar/ssh/keys', TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+EXTERNAL_KEYS_USERDATA = _USERDATA.format(bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' "
+ "/etc/ssh/sshd_config"))
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(EXTERNAL_KEYS_USERDATA)
+def test_external_keys(client: IntegrationInstance):
+ expected_keys = [
+ ('test_user1', '/etc/ssh/authorized_keys/test_user1/keys',
+ TEST_USER1_KEYS),
+ ('test_user2', '/etc/ssh/authorized_keys/test_user2/keys',
+ TEST_USER2_KEYS),
+ ('ubuntu', '/etc/ssh/authorized_keys/ubuntu/keys',
+ TEST_DEFAULT_KEYS),
+ ('root', '/etc/ssh/authorized_keys/root/keys', TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py
new file mode 100644
index 00000000..a45cad72
--- /dev/null
+++ b/tests/integration_tests/modules/test_user_events.py
@@ -0,0 +1,95 @@
+"""Test user-overridable events.
+
+This is currently limited to applying network config on BOOT events.
+"""
+
+import pytest
+import re
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _add_dummy_bridge_to_netplan(client: IntegrationInstance):
+ # Update netplan configuration to ensure it doesn't change on reboot
+ netplan = yaml.safe_load(
+ client.execute('cat /etc/netplan/50-cloud-init.yaml')
+ )
+ # Just a dummy bridge to do nothing
+ try:
+ netplan['network']['bridges']['dummy0'] = {'dhcp4': False}
+ except KeyError:
+ netplan['network']['bridges'] = {'dummy0': {'dhcp4': False}}
+
+ dumped_netplan = yaml.dump(netplan)
+ client.write_to_file('/etc/netplan/50-cloud-init.yaml', dumped_netplan)
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+@pytest.mark.not_xenial
+def test_boot_event_disabled_by_default(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Applying network configuration' in log
+ assert 'dummy0' not in client.execute('ls /sys/class/net')
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute('rm /var/log/cloud-init.log')
+
+ client.restart()
+ log2 = client.read_from_file('/var/log/cloud-init.log')
+
+ # We attempt to apply network config twice on every boot.
+ # Ensure neither time works.
+ assert 2 == len(
+ re.findall(r"Event Denied: scopes=\['network'\] EventType=boot[^-]",
+ log2)
+ )
+ assert 2 == log2.count(
+ "Event Denied: scopes=['network'] EventType=boot-legacy"
+ )
+ assert 2 == log2.count(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update allowed"
+ )
+
+ assert 'dummy0' in client.execute('ls /sys/class/net')
+
+
+def _test_network_config_applied_on_reboot(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Applying network configuration' in log
+ assert 'dummy0' not in client.execute('ls /sys/class/net')
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute('rm /var/log/cloud-init.log')
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+
+ assert 'Event Allowed: scope=network EventType=boot' in log
+ assert 'Applying network configuration' in log
+ assert 'dummy0' not in client.execute('ls /sys/class/net')
+
+
+@pytest.mark.azure
+@pytest.mark.not_xenial
+def test_boot_event_enabled_by_default(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
+
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: [boot]
+"""
+
+
+@pytest.mark.not_xenial
+@pytest.mark.user_data(USER_DATA)
+def test_boot_event_enabled(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py
new file mode 100644
index 00000000..4e9ab63f
--- /dev/null
+++ b/tests/integration_tests/modules/test_version_change.py
@@ -0,0 +1,56 @@
+from pathlib import Path
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import ASSETS_DIR
+
+
+PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl')
+TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl'
+
+
+def _assert_no_pickle_problems(log):
+ assert 'Failed loading pickled blob' not in log
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+
+def test_reboot_without_version_change(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Python version change detected' not in log
+ assert 'Cache compatibility status is currently unknown.' not in log
+ _assert_no_pickle_problems(log)
+
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Python version change detected' not in log
+ assert 'Could not determine Python version used to write cache' not in log
+ _assert_no_pickle_problems(log)
+
+ # Now ensure that loading a bad pickle gives us problems
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log
+
+
+def test_cache_purged_on_version_change(client: IntegrationInstance):
+ # Start by pushing the invalid pickle so we'll hit an error if the
+ # cache didn't actually get purged
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("echo '1.0' > /var/lib/cloud/data/python-version")
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Python version change detected. Purging cache' in log
+ _assert_no_pickle_problems(log)
+
+
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ # Start by pushing a pickle so we can see the log message
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("rm /var/lib/cloud/data/python-version")
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert (
+ 'Writing python-version file. '
+ 'Cache compatibility status is currently unknown.'
+ ) in log
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
index 7478a1b9..376fcc96 100644
--- a/tests/integration_tests/test_upgrade.py
+++ b/tests/integration_tests/test_upgrade.py
@@ -1,17 +1,35 @@
+import json
import logging
import os
import pytest
-import time
-from pathlib import Path
from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
-from tests.integration_tests.conftest import (
- get_validated_source,
- session_start_time,
-)
+from tests.integration_tests.conftest import get_validated_source
+
+
+LOG = logging.getLogger('integration_testing.test_upgrade')
+
+LOG_TEMPLATE = """\n\
+=== `systemd-analyze` before:
+{pre_systemd_analyze}
+=== `systemd-analyze` after:
+{post_systemd_analyze}
+=== `systemd-analyze blame` before (first 10 lines):
+{pre_systemd_blame}
+=== `systemd-analyze blame` after (first 10 lines):
+{post_systemd_blame}
-log = logging.getLogger('integration_testing')
+=== `cloud-init analyze show` before:')
+{pre_analyze_totals}
+=== `cloud-init analyze show` after:')
+{post_analyze_totals}
+
+=== `cloud-init analyze blame` before (first 10 lines): ')
+{pre_cloud_blame}
+=== `cloud-init analyze blame` after (first 10 lines): ')
+{post_cloud_blame}
+"""
UNSUPPORTED_INSTALL_METHOD_MSG = (
"Install method '{}' not supported for this test"
@@ -22,84 +40,101 @@ hostname: SRU-worked
"""
-def _output_to_compare(instance, file_path, netcfg_path):
- commands = [
- 'hostname',
- 'dpkg-query --show cloud-init',
- 'cat /run/cloud-init/result.json',
- # 'cloud-init init' helps us understand if our pickling upgrade paths
- # have broken across re-constitution of a cached datasource. Some
- # platforms invalidate their datasource cache on reboot, so we run
- # it here to ensure we get a dirty run.
- 'cloud-init init',
- 'grep Trace /var/log/cloud-init.log',
- 'cloud-id',
- 'cat {}'.format(netcfg_path),
- 'systemd-analyze',
- 'systemd-analyze blame',
- 'cloud-init analyze show',
- 'cloud-init analyze blame',
- ]
- with file_path.open('w') as f:
- for command in commands:
- f.write('===== {} ====='.format(command) + '\n')
- f.write(instance.execute(command) + '\n')
-
-
-def _restart(instance):
- # work around pad.lv/1908287
- instance.restart()
- if not instance.execute('cloud-init status --wait --long').ok:
- for _ in range(10):
- time.sleep(5)
- result = instance.execute('cloud-init status --wait --long')
- if result.ok:
- return
- raise Exception("Cloud-init didn't finish starting up")
-
-
-@pytest.mark.sru_2020_11
def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
source = get_validated_source(session_cloud)
if not source.installs_new_version():
pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
return # type checking doesn't understand that skip raises
+ if (ImageSpecification.from_os_image().release == 'bionic' and
+ session_cloud.settings.PLATFORM == 'lxd_vm'):
+ # The issues that we see on Bionic VMs don't appear anywhere
+ # else, including when calling KVM directly. It likely has to
+ # do with the extra lxd-agent setup happening on bionic.
+ # Given that we still have Bionic covered on all other platforms,
+ # the risk of skipping bionic here seems low enough.
+ pytest.skip("Upgrade test doesn't run on LXD VMs and bionic")
+ return
launch_kwargs = {
'image_id': session_cloud.released_image_id,
}
- image = ImageSpecification.from_os_image()
-
- # Get the paths to write test logs
- output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH)
- output_dir.mkdir(parents=True, exist_ok=True)
- base_filename = 'test_upgrade_{platform}_{os}_{{stage}}_{time}.log'.format(
- platform=session_cloud.settings.PLATFORM,
- os=image.release,
- time=session_start_time,
- )
- before_path = output_dir / base_filename.format(stage='before')
- after_path = output_dir / base_filename.format(stage='after')
-
- # Get the network cfg file
- netcfg_path = '/dev/null'
- if image.os == 'ubuntu':
- netcfg_path = '/etc/netplan/50-cloud-init.yaml'
- if image.release == 'xenial':
- netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg'
-
with session_cloud.launch(
launch_kwargs=launch_kwargs, user_data=USER_DATA,
) as instance:
- _output_to_compare(instance, before_path, netcfg_path)
+ # get pre values
+ pre_hostname = instance.execute('hostname')
+ pre_cloud_id = instance.execute('cloud-id')
+ pre_result = instance.execute('cat /run/cloud-init/result.json')
+ pre_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml')
+ pre_systemd_analyze = instance.execute('systemd-analyze')
+ pre_systemd_blame = instance.execute('systemd-analyze blame')
+ pre_cloud_analyze = instance.execute('cloud-init analyze show')
+ pre_cloud_blame = instance.execute('cloud-init analyze blame')
+
+ # Ensure no issues pre-upgrade
+ assert not json.loads(pre_result)['v1']['errors']
+
+ log = instance.read_from_file('/var/log/cloud-init.log')
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+ # Upgrade and reboot
instance.install_new_cloud_init(source, take_snapshot=False)
instance.execute('hostname something-else')
- _restart(instance)
+ instance.restart()
assert instance.execute('cloud-init status --wait --long').ok
- _output_to_compare(instance, after_path, netcfg_path)
- log.info('Wrote upgrade test logs to %s and %s', before_path, after_path)
+ # 'cloud-init init' helps us understand if our pickling upgrade paths
+ # have broken across re-constitution of a cached datasource. Some
+ # platforms invalidate their datasource cache on reboot, so we run
+ # it here to ensure we get a dirty run.
+ assert instance.execute('cloud-init init').ok
+
+ # get post values
+ post_hostname = instance.execute('hostname')
+ post_cloud_id = instance.execute('cloud-id')
+ post_result = instance.execute('cat /run/cloud-init/result.json')
+ post_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml')
+ post_systemd_analyze = instance.execute('systemd-analyze')
+ post_systemd_blame = instance.execute('systemd-analyze blame')
+ post_cloud_analyze = instance.execute('cloud-init analyze show')
+ post_cloud_blame = instance.execute('cloud-init analyze blame')
+
+ # Ensure no issues post-upgrade
+ assert not json.loads(pre_result)['v1']['errors']
+
+ log = instance.read_from_file('/var/log/cloud-init.log')
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+ # Ensure important things stayed the same
+ assert pre_hostname == post_hostname
+ assert pre_cloud_id == post_cloud_id
+ assert pre_result == post_result
+ assert pre_network == post_network
+
+ # Calculate and log all the boot numbers
+ pre_analyze_totals = [
+ x for x in pre_cloud_analyze.splitlines()
+ if x.startswith('Finished stage') or x.startswith('Total Time')
+ ]
+ post_analyze_totals = [
+ x for x in post_cloud_analyze.splitlines()
+ if x.startswith('Finished stage') or x.startswith('Total Time')
+ ]
+
+ # pylint: disable=logging-format-interpolation
+ LOG.info(LOG_TEMPLATE.format(
+ pre_systemd_analyze=pre_systemd_analyze,
+ post_systemd_analyze=post_systemd_analyze,
+ pre_systemd_blame='\n'.join(pre_systemd_blame.splitlines()[:10]),
+ post_systemd_blame='\n'.join(post_systemd_blame.splitlines()[:10]),
+ pre_analyze_totals='\n'.join(pre_analyze_totals),
+ post_analyze_totals='\n'.join(post_analyze_totals),
+ pre_cloud_blame='\n'.join(pre_cloud_blame.splitlines()[:10]),
+ post_cloud_blame='\n'.join(post_cloud_blame.splitlines()[:10]),
+ ))
@pytest.mark.ci
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index 3ef12358..80430eab 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -1,10 +1,18 @@
+import functools
import logging
import multiprocessing
import os
import time
from contextlib import contextmanager
+from collections import namedtuple
+from pathlib import Path
+
log = logging.getLogger('integration_testing')
+key_pair = namedtuple('key_pair', 'public_key private_key')
+
+ASSETS_DIR = Path('tests/integration_tests/assets')
+KEY_PATH = ASSETS_DIR / 'keys'
def verify_ordered_items_in_text(to_verify: list, text: str):
@@ -47,3 +55,42 @@ def emit_dots_on_travis():
yield
finally:
dot_process.terminate()
+
+
+def get_test_rsa_keypair(key_name: str = 'test1') -> key_pair:
+ private_key_path = KEY_PATH / 'id_rsa.{}'.format(key_name)
+ public_key_path = KEY_PATH / 'id_rsa.{}.pub'.format(key_name)
+ with public_key_path.open() as public_file:
+ public_key = public_file.read()
+ with private_key_path.open() as private_file:
+ private_key = private_file.read()
+ return key_pair(public_key, private_key)
+
+
+def retry(*, tries: int = 30, delay: int = 1):
+ """Decorator for retries.
+
+ Retry a function until code no longer raises an exception or
+ max tries is reached.
+
+ Example:
+ @retry(tries=5, delay=1)
+ def try_something_that_may_not_be_ready():
+ ...
+ """
+ def _retry(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ last_error = None
+ for _ in range(tries):
+ try:
+ func(*args, **kwargs)
+ break
+ except Exception as e:
+ last_error = e
+ time.sleep(delay)
+ else:
+ if last_error:
+ raise last_error
+ return wrapper
+ return _retry
diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py
new file mode 100644
index 00000000..63d2490e
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_hotplug_hook.py
@@ -0,0 +1,218 @@
+import pytest
+from collections import namedtuple
+from unittest import mock
+from unittest.mock import call
+
+from cloudinit.cmd.devel.hotplug_hook import handle_hotplug
+from cloudinit.distros import Distro
+from cloudinit.event import EventType
+from cloudinit.net.activators import NetworkActivator
+from cloudinit.net.network_state import NetworkState
+from cloudinit.sources import DataSource
+from cloudinit.stages import Init
+
+
+hotplug_args = namedtuple('hotplug_args', 'udevaction, subsystem, devpath')
+FAKE_MAC = '11:22:33:44:55:66'
+
+
+@pytest.yield_fixture
+def mocks():
+ m_init = mock.MagicMock(spec=Init)
+ m_distro = mock.MagicMock(spec=Distro)
+ m_datasource = mock.MagicMock(spec=DataSource)
+ m_datasource.distro = m_distro
+ m_init.datasource = m_datasource
+ m_init.fetch.return_value = m_datasource
+
+ read_sys_net = mock.patch(
+ 'cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe',
+ return_value=FAKE_MAC
+ )
+
+ m_network_state = mock.MagicMock(spec=NetworkState)
+ parse_net = mock.patch(
+ 'cloudinit.cmd.devel.hotplug_hook.parse_net_config_data',
+ return_value=m_network_state
+ )
+
+ m_activator = mock.MagicMock(spec=NetworkActivator)
+ select_activator = mock.patch(
+ 'cloudinit.cmd.devel.hotplug_hook.activators.select_activator',
+ return_value=m_activator
+ )
+
+ sleep = mock.patch('time.sleep')
+
+ read_sys_net.start()
+ parse_net.start()
+ select_activator.start()
+ m_sleep = sleep.start()
+
+ yield namedtuple('mocks', 'm_init m_network_state m_activator m_sleep')(
+ m_init=m_init,
+ m_network_state=m_network_state,
+ m_activator=m_activator,
+ m_sleep=m_sleep,
+ )
+
+ read_sys_net.stop()
+ parse_net.stop()
+ select_activator.stop()
+ sleep.stop()
+
+
+class TestUnsupportedActions:
+ def test_unsupported_subsystem(self, mocks):
+ with pytest.raises(
+ Exception,
+ match='cannot handle events for subsystem: not_real'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ subsystem='not_real',
+ udevaction='add'
+ )
+
+ def test_unsupported_udevaction(self, mocks):
+ with pytest.raises(ValueError, match='Unknown action: not_real'):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='not_real',
+ subsystem='net'
+ )
+
+
+class TestHotplug:
+ def test_succcessful_add(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [{
+ 'mac_address': FAKE_MAC,
+ }]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with([
+ EventType.HOTPLUG
+ ])
+ mocks.m_activator.bring_up_interface.assert_called_once_with('fake')
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_successful_remove(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with([
+ EventType.HOTPLUG
+ ])
+ mocks.m_activator.bring_down_interface.assert_called_once_with('fake')
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_update_event_disabled(self, mocks, caplog):
+ init = mocks.m_init
+ init.update_event_enabled.return_value = False
+ handle_hotplug(
+ hotplug_init=init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+ assert 'hotplug not enabled for event of type' in caplog.text
+ init.datasource.update_metadata_if_supported.assert_not_called()
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_not_called()
+
+ def test_update_metadata_failed(self, mocks):
+ mocks.m_init.datasource.update_metadata_if_supported.return_value = \
+ False
+ with pytest.raises(
+ RuntimeError, match='Datasource .* not updated for event hotplug'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+
+ def test_detect_hotplugged_device_not_detected_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ with pytest.raises(
+ RuntimeError,
+ match='Failed to detect {} in updated metadata'.format(FAKE_MAC)
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+
+ def test_detect_hotplugged_device_detected_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{
+ 'mac_address': FAKE_MAC,
+ }]
+ with pytest.raises(
+ RuntimeError,
+ match='Failed to detect .* in updated metadata'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+
+ def test_apply_failed_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{
+ 'mac_address': FAKE_MAC,
+ }]
+ mocks.m_activator.bring_up_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match='Failed to bring up device: /dev/fake'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+
+ def test_apply_failed_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ mocks.m_activator.bring_down_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match='Failed to bring down device: /dev/fake'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+
+ def test_retry(self, mocks):
+ with pytest.raises(RuntimeError):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+ assert mocks.m_sleep.call_count == 5
+ assert mocks.m_sleep.call_args_list == [
+ call(1), call(3), call(5), call(10), call(30)
+ ]
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index c5675249..30293e9e 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -27,6 +27,8 @@ from cloudinit.handlers.upstart_job import UpstartJobPartHandler
from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
+INSTANCE_DATA_FILE = 'instance-data-sensitive.json'
+
class TestUpstartJobPartHandler(FilesystemMockingTestCase):
@@ -145,8 +147,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_handler = ShellScriptPartHandler(self.paths)
self.assertEqual(2, script_handler.handler_version)
- # Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ # Create required instance data json file
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': 'echo himom'}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
@@ -168,7 +170,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
self.assertEqual(3, cloudcfg_handler.handler_version)
# Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
@@ -198,8 +200,9 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_file = os.path.join(script_handler.script_dir, 'part01')
self.assertEqual(
'Cannot render jinja template vars. Instance data not yet present'
- ' at {}/instance-data.json'.format(
- self.run_dir), str(context_manager.exception))
+ ' at {}/{}'.format(self.run_dir, INSTANCE_DATA_FILE),
+ str(context_manager.exception)
+ )
self.assertFalse(
os.path.exists(script_file),
'Unexpected file created %s' % script_file)
@@ -207,7 +210,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
def test_jinja_template_handle_errors_on_unreadable_instance_data(self):
"""If instance-data is unreadable, raise an error from handle_part."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(
+ self.run_dir, INSTANCE_DATA_FILE)
util.write_file(instance_json, util.json_dumps({}))
h = JinjaTemplatePartHandler(
self.paths, sub_handlers=[script_handler])
@@ -221,8 +225,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
frequency='freq', headers='headers')
script_file = os.path.join(script_handler.script_dir, 'part01')
self.assertEqual(
- 'Cannot render jinja template vars. No read permission on'
- " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir),
+ "Cannot render jinja template vars. No read permission on "
+ "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE),
str(context_manager.exception))
self.assertFalse(
os.path.exists(script_file),
@@ -230,9 +234,9 @@ class TestJinjaTemplatePartHandler(CiTestCase):
@skipUnlessJinja()
def test_jinja_template_handle_renders_jinja_content(self):
- """When present, render jinja variables from instance-data.json."""
+ """When present, render jinja variables from instance data"""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': {'subkey': 'echo himom'}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
@@ -247,8 +251,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
frequency='freq', headers='headers')
script_file = os.path.join(script_handler.script_dir, 'part01')
self.assertNotIn(
- 'Instance data not yet present at {}/instance-data.json'.format(
- self.run_dir),
+ 'Instance data not yet present at {}/{}'.format(
+ self.run_dir, INSTANCE_DATA_FILE),
self.logs.getvalue())
self.assertEqual(
'#!/bin/bash\necho himom', util.load_file(script_file))
@@ -257,7 +261,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
def test_jinja_template_handle_renders_jinja_content_missing_keys(self):
"""When specified jinja variable is undefined, log a warning."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': {'subkey': 'echo himom'}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index f5cf514d..a39e1d0c 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -224,7 +224,9 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
expected_doc_sections = [
'**Supported distros:** all',
- '**Supported distros:** almalinux, alpine, centos, debian, fedora',
+ ('**Supported distros:** almalinux, alpine, centos, debian, '
+ 'eurolinux, fedora, opensuse, photon, rhel, rocky, sles, ubuntu, '
+ 'virtuozzo'),
'**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
'**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
]
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 742d1faa..851cf82e 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -635,15 +635,20 @@ scbus-1 on xpt0 bus 0
def _get_ds(self, data, agent_command=None, distro='ubuntu',
apply_network=None, instance_id=None):
- def dsdevs():
- return data.get('dsdevs', [])
-
def _wait_for_files(flist, _maxwait=None, _naplen=None):
data['waited'] = flist
return []
+ def _load_possible_azure_ds(seed_dir, cache_dir):
+ yield seed_dir
+ yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
+ yield from data.get('dsdevs', [])
+ if cache_dir:
+ yield cache_dir
+
+ seed_dir = os.path.join(self.paths.seed_dir, "azure")
if data.get('ovfcontent') is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
+ populate_dir(seed_dir,
{'ovf-env.xml': data['ovfcontent']})
dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
@@ -654,6 +659,8 @@ scbus-1 on xpt0 bus 0
self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
self.m_ephemeral_dhcpv4 = mock.MagicMock()
self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
+ self.m_list_possible_azure_ds = mock.MagicMock(
+ side_effect=_load_possible_azure_ds)
if instance_id:
self.instance_id = instance_id
@@ -667,7 +674,8 @@ scbus-1 on xpt0 bus 0
return '7783-7084-3265-9085-8269-3286-77'
self.apply_patches([
- (dsaz, 'list_possible_azure_ds_devs', dsdevs),
+ (dsaz, 'list_possible_azure_ds',
+ self.m_list_possible_azure_ds),
(dsaz, 'perform_hostname_bounce', mock.MagicMock()),
(dsaz, 'get_hostname', mock.MagicMock()),
(dsaz, 'set_hostname', mock.MagicMock()),
@@ -844,9 +852,14 @@ scbus-1 on xpt0 bus 0
"""When a device path is used, present that in subplatform."""
data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']}
dsrc = self._get_ds(data)
+ # DSAzure will attempt to mount /dev/sr0 first, which should
+ # fail with mount error since the list of devices doesn't have
+ # /dev/sr0
with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb:
- m_mount_cb.return_value = (
- {'local-hostname': 'me'}, 'ud', {'cfg': ''}, {})
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {})
+ ]
self.assertTrue(dsrc.get_data())
self.assertEqual(dsrc.userdata_raw, 'ud')
self.assertEqual(dsrc.metadata['local-hostname'], 'me')
@@ -1608,12 +1621,19 @@ scbus-1 on xpt0 bus 0
@mock.patch(MOCKPATH + 'util.is_FreeBSD')
@mock.patch(MOCKPATH + '_check_freebsd_cdrom')
- def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom,
- m_is_FreeBSD):
+ def test_list_possible_azure_ds(self, m_check_fbsd_cdrom,
+ m_is_FreeBSD):
"""On FreeBSD, possible devs should show /dev/cd0."""
m_is_FreeBSD.return_value = True
m_check_fbsd_cdrom.return_value = True
- self.assertEqual(dsaz.list_possible_azure_ds_devs(), ['/dev/cd0'])
+ possible_ds = []
+ for src in dsaz.list_possible_azure_ds(
+ "seed_dir", "cache_dir"):
+ possible_ds.append(src)
+ self.assertEqual(possible_ds, ["seed_dir",
+ dsaz.DEFAULT_PROVISIONING_ISO_DEV,
+ "/dev/cd0",
+ "cache_dir"])
self.assertEqual(
[mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
@@ -1967,11 +1987,19 @@ class TestAzureBounce(CiTestCase):
with_logs = True
def mock_out_azure_moving_parts(self):
+
+ def _load_possible_azure_ds(seed_dir, cache_dir):
+ yield seed_dir
+ yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
+ if cache_dir:
+ yield cache_dir
+
self.patches.enter_context(
mock.patch.object(dsaz.util, 'wait_for_files'))
self.patches.enter_context(
- mock.patch.object(dsaz, 'list_possible_azure_ds_devs',
- mock.MagicMock(return_value=[])))
+ mock.patch.object(
+ dsaz, 'list_possible_azure_ds',
+ mock.MagicMock(side_effect=_load_possible_azure_ds)))
self.patches.enter_context(
mock.patch.object(dsaz, 'get_metadata_from_fabric',
mock.MagicMock(return_value={})))
@@ -2797,7 +2825,8 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
def test_check_if_nic_is_primary_retries_on_failures(
self, m_dhcpv4, m_imds):
- """Retry polling for network metadata on all failures except timeout"""
+ """Retry polling for network metadata on all failures except timeout
+ and network unreachable errors"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
lease = {
'interface': 'eth9', 'fixed-address': '192.168.2.9',
@@ -2826,8 +2855,13 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
error = url_helper.UrlError(cause=cause, code=410)
eth0Retries.append(exc_cb("No goal state.", error))
else:
- cause = requests.Timeout('Fake connection timeout')
for _ in range(0, 10):
+ # We are expected to retry for a certain period for both
+ # timeout errors and network unreachable errors.
+ if _ < 5:
+ cause = requests.Timeout('Fake connection timeout')
+ else:
+ cause = requests.ConnectionError('Network Unreachable')
error = url_helper.UrlError(cause=cause)
eth1Retries.append(exc_cb("Connection timeout", error))
# Should stop retrying after 10 retries
@@ -2873,6 +2907,35 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
dsa.wait_for_link_up("eth0")
self.assertEqual(1, m_is_link_up.call_count)
+ @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
+ @mock.patch(MOCKPATH + 'util.write_file')
+ @mock.patch('cloudinit.net.read_sys_net')
+ @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
+ def test_wait_for_link_up_checks_link_after_sleep(
+ self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch('ubuntu')
+ distro = distro_cls('ubuntu', {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_try_set_link_up.return_value = False
+
+ callcount = 0
+
+ def is_up_mock(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_up.side_effect = is_up_mock
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_try_set_link_up.call_count)
+ self.assertEqual(2, m_is_up.call_count)
+
@mock.patch(MOCKPATH + 'util.write_file')
@mock.patch('cloudinit.net.read_sys_net')
@mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
@@ -3163,8 +3226,8 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
expected_logs = [
'INFO: Removing Ubuntu extended network scripts because cloud-init'
- ' updates Azure network configuration on the following event:'
- ' System boot.',
+ ' updates Azure network configuration on the following events:'
+ " ['boot', 'boot-legacy']",
'Recursively deleting %s' % subdir,
'Attempting to remove %s' % file1]
for log in expected_logs:
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 5e9c547a..00f0a78c 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -29,6 +29,7 @@ from cloudinit.sources import (
DataSourceSmartOS as SmartOS,
DataSourceUpCloud as UpCloud,
DataSourceVultr as Vultr,
+ DataSourceVMware as VMware,
)
from cloudinit.sources import DataSourceNone as DSNone
@@ -52,6 +53,7 @@ DEFAULT_LOCAL = [
RbxCloud.DataSourceRbxCloud,
Scaleway.DataSourceScaleway,
UpCloud.DataSourceUpCloudLocal,
+ VMware.DataSourceVMware,
]
DEFAULT_NETWORK = [
@@ -68,6 +70,7 @@ DEFAULT_NETWORK = [
OpenStack.DataSourceOpenStack,
OVF.DataSourceOVFNet,
UpCloud.DataSourceUpCloud,
+ VMware.DataSourceVMware,
]
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index dce01f5d..9f52b504 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -83,6 +83,103 @@ class TestReadOvfEnv(CiTestCase):
self.assertEqual({'password': "passw0rd"}, cfg)
self.assertIsNone(ud)
+ def test_with_b64_network_config_enable_read_network(self):
+ network_config = dedent("""\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """)
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {"network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual("inst-001", md["instance-id"])
+ self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertEqual(
+ {'version': 2, 'ethernets':
+ {'nics':
+ {'nameservers':
+ {'addresses': ['127.0.0.53'],
+ 'search': ['eng.vmware.com', 'vmware.com']},
+ 'match': {'name': 'eth*'},
+ 'gateway4': '10.10.10.253',
+ 'dhcp4': False,
+ 'addresses': ['10.10.10.1/24']}}},
+ md["network-config"])
+ self.assertIsNone(ud)
+
+ def test_with_non_b64_network_config_enable_read_network(self):
+ network_config = dedent("""\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """)
+ props = {"network-config": network_config,
+ "password": "passw0rd",
+ "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_disable_read_network(self):
+ network_config = dedent("""\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """)
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {"network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
class TestMarkerFiles(CiTestCase):
@@ -138,18 +235,17 @@ class TestDatasourceOVF(CiTestCase):
self.assertIn(
'DEBUG: No system-product-name found', self.logs.getvalue())
- def test_get_data_no_vmware_customization_disabled(self):
- """When cloud-init workflow for vmware is disabled via sys_cfg and
- no meta data provided, log a message.
+ def test_get_data_vmware_customization_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ allow_raw_data is disabled via ds_cfg, log a message.
"""
paths = Paths({'cloud_dir': self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
+ sys_cfg={'disable_vmware_customization': True,
+ 'datasource': {'OVF': {'allow_raw_data': False}}},
+ distro={}, paths=paths)
conf_file = self.tmp_path('test-cust', self.tdir)
conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
[MISC]
MARKER-ID = 12345345
""")
@@ -168,7 +264,71 @@ class TestDatasourceOVF(CiTestCase):
'DEBUG: Customization for VMware platform is disabled.',
self.logs.getvalue())
- def test_get_data_vmware_customization_disabled(self):
+ def test_get_data_vmware_customization_sys_cfg_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ no meta data is found, log a message.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': True,
+ 'datasource': {'OVF': {'allow_raw_data': True}}},
+ distro={}, paths=paths)
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [MISC]
+ MARKER-ID = 12345345
+ """)
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'transport_iso9660': NOT_FOUND,
+ 'transport_vmware_guestinfo': NOT_FOUND,
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file},
+ ds.get_data)
+ self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ self.assertIn(
+ 'DEBUG: Customization using VMware config is disabled.',
+ self.logs.getvalue())
+
+ def test_get_data_allow_raw_data_disabled(self):
+ """When allow_raw_data is disabled via ds_cfg and
+ meta data is found, log a message.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': False,
+ 'datasource': {'OVF': {'allow_raw_data': False}}},
+ distro={}, paths=paths)
+
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """)
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path('test-meta', self.tdir)
+ util.write_file(metadata_file, "This is meta data")
+ retcode = wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'transport_iso9660': NOT_FOUND,
+ 'transport_vmware_guestinfo': NOT_FOUND,
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']},
+ ds.get_data)
+ self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ self.assertIn(
+ 'DEBUG: Customization using raw data is disabled.',
+ self.logs.getvalue())
+
+ def test_get_data_vmware_customization_enabled(self):
"""When cloud-init workflow for vmware is enabled via sys_cfg log a
message.
"""
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 5847a384..9c499672 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -29,7 +29,7 @@ from cloudinit.sources.DataSourceSmartOS import (
convert_smartos_network_data as convert_net,
SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ,
identify_file)
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit import helpers as c_helpers
from cloudinit.util import (b64e, write_file)
@@ -653,8 +653,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
def test_reconfig_network_on_boot(self):
# Test to ensure that network is configured from metadata on each boot
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]),
- dsrc.update_events['network'])
+ self.assertSetEqual(
+ {EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY},
+ dsrc.default_update_events[EventScope.NETWORK]
+ )
class TestIdentifyFile(CiTestCase):
diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py
new file mode 100644
index 00000000..52f910b5
--- /dev/null
+++ b/tests/unittests/test_datasource/test_vmware.py
@@ -0,0 +1,391 @@
+# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import gzip
+import os
+
+import pytest
+
+from cloudinit import dmi, helpers, safeyaml
+from cloudinit import settings
+from cloudinit.sources import DataSourceVMware
+from cloudinit.tests.helpers import (
+ mock,
+ CiTestCase,
+ FilesystemMockingTestCase,
+ populate_dir,
+)
+
+
+PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name"
+PRODUCT_NAME = "VMware7,1"
+PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB"
+REROOT_FILES = {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+}
+
+VMW_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com",
+]
+VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com"
+
+VMW_METADATA_YAML = """instance-id: cloud-vm
+local-hostname: cloud-vm
+network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+"""
+
+VMW_USERDATA_YAML = """## template: jinja
+#cloud-config
+users:
+- default
+"""
+
+VMW_VENDORDATA_YAML = """## template: jinja
+#cloud-config
+runcmd:
+- echo "Hello, world."
+"""
+
+
+@pytest.yield_fixture(autouse=True)
+def common_patches():
+ with mock.patch('cloudinit.util.platform.platform', return_value='Linux'):
+ with mock.patch.multiple(
+ 'cloudinit.dmi',
+ is_container=mock.Mock(return_value=False),
+ is_FreeBSD=mock.Mock(return_value=False)
+ ):
+ yield
+
+
+class TestDataSourceVMware(CiTestCase):
+ """
+ Test common functionality that is not transport specific.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMware, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_no_data_access_method(self):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+ def test_get_host_info(self):
+ host_info = DataSourceVMware.get_host_info()
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+
+
+class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase):
+ """
+ Test the envvar transport.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareEnvVars, self).setUp()
+ self.tmp = self.tmp_dir()
+ os.environ[DataSourceVMware.VMX_GUESTINFO] = "1"
+ self.create_system_files()
+
+ def tearDown(self):
+ del os.environ[DataSourceVMware.VMX_GUESTINFO]
+ return super(TestDataSourceVMwareEnvVars, self).tearDown()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR,
+ DataSourceVMware.get_guestinfo_envvar_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_only(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method,
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ def test_ds_valid_on_vmware_platform(self):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, PRODUCT_NAME)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ DataSourceVMware.get_guestinfo_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a non-VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_ds_invalid_on_non_vmware_platform(self, m_fn):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, None)
+
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+
+def assert_metadata(test_obj, ds, metadata):
+ test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id())
+ test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname())
+
+ expected_public_keys = metadata.get("public_keys")
+ if not isinstance(expected_public_keys, list):
+ expected_public_keys = [expected_public_keys]
+
+ test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys())
+ test_obj.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+def get_ds(temp_dir):
+ ds = DataSourceVMware.DataSourceVMware(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir})
+ )
+ ds.vmware_rpctool = "vmware-rpctool"
+ return ds
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index 94ab052d..021866b7 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -23,7 +23,7 @@ class MyBaseDistro(distros.Distro):
def _write_network(self, settings):
raise NotImplementedError()
- def package_command(self, cmd, args=None, pkgs=None):
+ def package_command(self, command, args=None, pkgs=None):
raise NotImplementedError()
def update_package_sources(self):
diff --git a/tests/unittests/test_distros/test_dragonflybsd.py b/tests/unittests/test_distros/test_dragonflybsd.py
new file mode 100644
index 00000000..df2c00f4
--- /dev/null
+++ b/tests/unittests/test_distros/test_dragonflybsd.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+
+
+import cloudinit.util
+from cloudinit.tests.helpers import mock
+
+
+def test_find_dragonflybsd_part():
+ assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3"
+
+
+@mock.patch("cloudinit.util.is_DragonFlyBSD")
+@mock.patch("cloudinit.subp.subp")
+def test_parse_mount(mock_subp, m_is_DragonFlyBSD):
+ mount_out = """
+vbd0s3 on / (hammer2, local)
+devfs on /dev (devfs, nosymfollow, local)
+/dev/vbd0s0a on /boot (ufs, local)
+procfs on /proc (procfs, local)
+tmpfs on /var/run/shm (tmpfs, local)
+"""
+
+ mock_subp.return_value = (mount_out, "")
+ m_is_DragonFlyBSD.return_value = True
+ assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/")
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index a1df066a..d09e46af 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -2,6 +2,7 @@
import copy
import os
+import re
from io import StringIO
from textwrap import dedent
from unittest import mock
@@ -14,7 +15,7 @@ from cloudinit.tests.helpers import (
FilesystemMockingTestCase, dir2dict)
from cloudinit import subp
from cloudinit import util
-
+from cloudinit import safeyaml
BASE_NET_CFG = '''
auto lo
@@ -88,6 +89,24 @@ V1_NET_CFG = {'config': [{'name': 'eth0',
'type': 'physical'}],
'version': 1}
+V1_NET_CFG_WITH_DUPS = """\
+# same value in interface specific dns and global dns
+# should produce single entry in network file
+version: 1
+config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: static
+ address: 192.168.0.102/24
+ dns_nameservers: [1.2.3.4]
+ dns_search: [test.com]
+ interface: eth0
+ - type: nameserver
+ address: [1.2.3.4]
+ search: [test.com]
+"""
+
V1_NET_CFG_OUTPUT = """\
# This file is generated from information provided by the datasource. Changes
# to it will not persist across an instance reboot. To disable cloud-init's
@@ -771,6 +790,125 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
with_netplan=True)
+class TestNetCfgDistroPhoton(TestNetCfgDistroBase):
+
+ def setUp(self):
+ super(TestNetCfgDistroPhoton, self).setUp()
+ self.distro = self._get_distro('photon', renderers=['networkd'])
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r'^\[(.+)\]$', line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ assert key
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
+ bringup=False):
+ if not expected_cfgs:
+ raise ValueError('expected_cfg must not be None')
+
+ tmpd = None
+ with mock.patch('cloudinit.net.networkd.available') as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ actual = self.create_conf_dict(results[cfgpath].splitlines())
+ self.compare_dicts(actual, expected)
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def nwk_file_path(self, ifname):
+ return '/etc/systemd/network/10-cloud-init-%s.network' % ifname
+
+ def net_cfg_1(self, ifname):
+ ret = """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.1.5/24
+ [Route]
+ Gateway=192.168.1.254""" % ifname
+ return ret
+
+ def net_cfg_2(self, ifname):
+ ret = """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=ipv4""" % ifname
+ return ret
+
+ def test_photon_network_config_v1(self):
+ tmp = self.net_cfg_1('eth0').splitlines()
+ expected_eth0 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2('eth1').splitlines()
+ expected_eth1 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path('eth0'): expected_eth0,
+ self.nwk_file_path('eth1'): expected_eth1,
+ }
+
+ self._apply_and_verify(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs.copy())
+
+ def test_photon_network_config_v2(self):
+ tmp = self.net_cfg_1('eth7').splitlines()
+ expected_eth7 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2('eth9').splitlines()
+ expected_eth9 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path('eth7'): expected_eth7,
+ self.nwk_file_path('eth9'): expected_eth9,
+ }
+
+ self._apply_and_verify(self.distro.apply_network_config,
+ V2_NET_CFG,
+ expected_cfgs.copy())
+
+ def test_photon_network_config_v1_with_duplicates(self):
+ expected = """\
+ [Match]
+ Name=eth0
+ [Network]
+ DHCP=no
+ DNS=1.2.3.4
+ Domains=test.com
+ [Address]
+ Address=192.168.0.102/24"""
+
+ net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS)
+
+ expected = self.create_conf_dict(expected.splitlines())
+ expected_cfgs = {
+ self.nwk_file_path('eth0'): expected,
+ }
+
+ self._apply_and_verify(self.distro.apply_network_config,
+ net_cfg,
+ expected_cfgs.copy())
+
+
def get_mode(path, target=None):
return os.stat(subp.target_path(target, path)).st_mode & 0o777
diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/test_distros/test_photon.py
new file mode 100644
index 00000000..1c3145ca
--- /dev/null
+++ b/tests/unittests/test_distros/test_photon.py
@@ -0,0 +1,68 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from . import _get_distro
+from cloudinit import util
+from cloudinit.tests.helpers import mock
+from cloudinit.tests.helpers import CiTestCase
+
+SYSTEM_INFO = {
+ 'paths': {
+ 'cloud_dir': '/var/lib/cloud/',
+ 'templates_dir': '/etc/cloud/templates/',
+ },
+ 'network': {'renderers': 'networkd'},
+}
+
+
+class TestPhoton(CiTestCase):
+ with_logs = True
+ distro = _get_distro('photon', SYSTEM_INFO)
+ expected_log_line = 'Rely on PhotonOS default network config'
+
+ def test_network_renderer(self):
+ self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd')
+
+ def test_get_distro(self):
+ self.assertEqual(self.distro.osfamily, 'photon')
+
+ @mock.patch("cloudinit.distros.photon.subp.subp")
+ def test_write_hostname(self, m_subp):
+ hostname = 'myhostname'
+ hostfile = self.tmp_path('previous-hostname')
+ self.distro._write_hostname(hostname, hostfile)
+ self.assertEqual(hostname, util.load_file(hostfile))
+
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ m_subp.return_value = (None, None)
+ hostfile += 'hostfile'
+ self.distro._write_hostname(hostname, hostfile)
+
+ m_subp.return_value = (hostname, None)
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ self.logs.truncate(0)
+ m_subp.return_value = (None, 'bla')
+ self.distro._write_hostname(hostname, None)
+ self.assertIn('Error while setting hostname', self.logs.getvalue())
+
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_fallback_netcfg(self, m_fallback_cfg):
+
+ key = 'disable_fallback_netcfg'
+ # Don't use fallback if no setting given
+ self.logs.truncate(0)
+ assert(self.distro.generate_fallback_config() is None)
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = True
+ assert(self.distro.generate_fallback_config() is None)
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = False
+ assert(self.distro.generate_fallback_config() is not None)
+ self.assertNotIn(self.expected_log_line, self.logs.getvalue())
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 1d8aaf18..8617d7bd 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -649,6 +649,50 @@ class TestDsIdentify(DsIdentifyBase):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
self._test_ds_not_found('Ec2-E24Cloud-negative')
+ def test_vmware_no_valid_transports(self):
+ """VMware: no valid transports"""
+ self._test_ds_not_found('VMware-NoValidTransports')
+
+ def test_vmware_envvar_no_data(self):
+ """VMware: envvar transport no data"""
+ self._test_ds_not_found('VMware-EnvVar-NoData')
+
+ def test_vmware_envvar_no_virt_id(self):
+ """VMware: envvar transport success if no virt id"""
+ self._test_ds_found('VMware-EnvVar-NoVirtID')
+
+ def test_vmware_envvar_activated_by_metadata(self):
+ """VMware: envvar transport activated by metadata"""
+ self._test_ds_found('VMware-EnvVar-Metadata')
+
+ def test_vmware_envvar_activated_by_userdata(self):
+ """VMware: envvar transport activated by userdata"""
+ self._test_ds_found('VMware-EnvVar-Userdata')
+
+ def test_vmware_envvar_activated_by_vendordata(self):
+ """VMware: envvar transport activated by vendordata"""
+ self._test_ds_found('VMware-EnvVar-Vendordata')
+
+ def test_vmware_guestinfo_no_data(self):
+ """VMware: guestinfo transport no data"""
+ self._test_ds_not_found('VMware-GuestInfo-NoData')
+
+ def test_vmware_guestinfo_no_virt_id(self):
+ """VMware: guestinfo transport fails if no virt id"""
+ self._test_ds_not_found('VMware-GuestInfo-NoVirtID')
+
+ def test_vmware_guestinfo_activated_by_metadata(self):
+ """VMware: guestinfo transport activated by metadata"""
+ self._test_ds_found('VMware-GuestInfo-Metadata')
+
+ def test_vmware_guestinfo_activated_by_userdata(self):
+ """VMware: guestinfo transport activated by userdata"""
+ self._test_ds_found('VMware-GuestInfo-Userdata')
+
+ def test_vmware_guestinfo_activated_by_vendordata(self):
+ """VMware: guestinfo transport activated by vendordata"""
+ self._test_ds_found('VMware-GuestInfo-Vendordata')
+
class TestBSDNoSys(DsIdentifyBase):
"""Test *BSD code paths
@@ -1136,7 +1180,240 @@ VALID_CFG = {
'Ec2-E24Cloud-negative': {
'ds': 'Ec2',
'files': {P_SYS_VENDOR: 'e24cloudyday\n'},
- }
+ },
+ 'VMware-NoValidTransports': {
+ 'ds': 'VMware',
+ 'mocks': [
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-NoData': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-NoVirtID': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ ],
+ },
+ 'VMware-EnvVar-Metadata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-Userdata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-Vendordata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 0,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-NoData': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-NoVirtID': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ ],
+ },
+ 'VMware-GuestInfo-Metadata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-Userdata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-Vendordata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index ac847238..abb0a9b6 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -1009,6 +1009,29 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
self.assertEqual(mirrors['SECURITY'],
smir)
+ def test_apt_v3_add_mirror_keys(self):
+ """test_apt_v3_add_mirror_keys - Test adding key for mirrors"""
+ arch = 'amd64'
+ cfg = {
+ 'primary': [
+ {'arches': [arch],
+ 'uri': 'http://test.ubuntu.com/',
+ 'key': 'fakekey_primary'}],
+ 'security': [
+ {'arches': [arch],
+ 'uri': 'http://testsec.ubuntu.com/',
+ 'key': 'fakekey_security'}]
+ }
+
+ with mock.patch.object(cc_apt_configure,
+ 'add_apt_key_raw') as mockadd:
+ cc_apt_configure.add_mirror_keys(cfg, TARGET)
+ calls = [
+ mock.call('fakekey_primary', TARGET),
+ mock.call('fakekey_security', TARGET),
+ ]
+ mockadd.assert_has_calls(calls, any_order=True)
+
class TestDebconfSelections(TestCase):
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index e87069f6..69e8b30d 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -133,6 +133,15 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
disk_path,
cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
+ def test_device_aliases_remapping(self):
+ disk_path = '/dev/sda'
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(disk_path,
+ cc_mounts.sanitize_devname('mydata',
+ lambda x: None,
+ mock.Mock(),
+ {'mydata': disk_path}))
+
class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
index 62388ac6..b7891ab4 100644
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ b/tests/unittests/test_handler/test_handler_puppet.py
@@ -3,7 +3,7 @@
from cloudinit.config import cc_puppet
from cloudinit.sources import DataSourceNone
from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.tests.helpers import CiTestCase, mock
+from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock
import logging
import textwrap
@@ -63,7 +63,8 @@ class TestPuppetHandle(CiTestCase):
super(TestPuppetHandle, self).setUp()
self.new_root = self.tmp_dir()
self.conf = self.tmp_path('puppet.conf')
- self.csr_attributes_path = self.tmp_path('csr_attributes.yaml')
+ self.csr_attributes_path = self.tmp_path(
+ 'csr_attributes.yaml')
def _get_cloud(self, distro):
paths = helpers.Paths({'templates_dir': self.new_root})
@@ -72,7 +73,7 @@ class TestPuppetHandle(CiTestCase):
myds = DataSourceNone.DataSourceNone({}, mydist, paths)
return cloud.Cloud(myds, paths, {}, mydist, None)
- def test_handler_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
+ def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
"""Cloud-config containing no 'puppet' key is skipped."""
mycloud = self._get_cloud('ubuntu')
cfg = {}
@@ -81,19 +82,19 @@ class TestPuppetHandle(CiTestCase):
"no 'puppet' configuration found", self.logs.getvalue())
self.assertEqual(0, m_auto.call_count)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto):
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_starts_puppet_service(self, m_subp, m_auto):
"""Cloud-config 'puppet' configuration starts puppet."""
mycloud = self._get_cloud('ubuntu')
cfg = {'puppet': {'install': False}}
cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
self.assertEqual(1, m_auto.call_count)
- self.assertEqual(
+ self.assertIn(
[mock.call(['service', 'puppet', 'start'], capture=False)],
m_subp.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
"""Cloud-config empty 'puppet' configuration installs latest puppet."""
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
@@ -103,8 +104,8 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _):
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_on_true(self, m_subp, _):
"""Cloud-config with 'puppet' key installs when 'install' is True."""
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
@@ -114,8 +115,85 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_version(self, m_subp, _):
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio'."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True, 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ None, None, True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_version(self,
+ m_subp, m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'version' is specified."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True,
+ 'version': '6.24.0', 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ '6.24.0', None, True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_collection(self,
+ m_subp,
+ m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'collection' is specified."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True,
+ 'collection': 'puppet6', 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ None, 'puppet6', True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_custom_url(self,
+ m_subp,
+ m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'aio_install_url' is specified."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet':
+ {'install': True,
+ 'aio_install_url': 'http://test.url/path/to/script.sh',
+ 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ 'http://test.url/path/to/script.sh', None, None, True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_without_cleanup(self,
+ m_subp,
+ m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and no cleanup."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True,
+ 'cleanup': False, 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ None, None, False)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_version(self, m_subp, _):
"""Cloud-config 'puppet' configuration can specify a version."""
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
@@ -125,26 +203,39 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', '3.8'))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto):
+ @mock.patch('cloudinit.config.cc_puppet.get_config_value')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_updates_puppet_conf(self,
+ m_subp, m_default, m_auto):
"""When 'conf' is provided update values in PUPPET_CONF_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.conf
+
+ m_default.side_effect = _fake_get_config_value
mycloud = self._get_cloud('ubuntu')
cfg = {
'puppet': {
- 'conf': {'agent': {'server': 'puppetmaster.example.org'}}}}
- util.write_file(self.conf, '[agent]\nserver = origpuppet\nother = 3')
- puppet_conf_path = 'cloudinit.config.cc_puppet.PUPPET_CONF_PATH'
+ 'conf': {'agent': {'server': 'puppetserver.example.org'}}}}
+ util.write_file(
+ self.conf, '[agent]\nserver = origpuppet\nother = 3')
mycloud.distro = mock.MagicMock()
- with mock.patch(puppet_conf_path, self.conf):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
content = util.load_file(self.conf)
- expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n'
+ expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n'
self.assertEqual(expected, content)
+ @mock.patch('cloudinit.config.cc_puppet.get_config_value')
@mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto):
+ def test_puppet_writes_csr_attributes_file(self,
+ m_subp, m_default, m_auto):
"""When csr_attributes is provided
creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.csr_attributes_path
+
+ m_default.side_effect = _fake_get_config_value
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
cfg = {
@@ -163,10 +254,7 @@ class TestPuppetHandle(CiTestCase):
}
}
}
- csr_attributes = 'cloudinit.config.cc_puppet.' \
- 'PUPPET_CSR_ATTRIBUTES_PATH'
- with mock.patch(csr_attributes, self.csr_attributes_path):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
content = util.load_file(self.csr_attributes_path)
expected = textwrap.dedent("""\
custom_attributes:
@@ -177,3 +265,101 @@ class TestPuppetHandle(CiTestCase):
pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
""")
self.assertEqual(expected, content)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {'puppet': {'exec': True}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(['puppet', 'agent', '--test'], capture=False)],
+ m_subp.call_args_list)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_list_if_requested(self,
+ m_subp, m_auto):
+ """Run puppet with 'exec_args' list if 'exec' is set to True."""
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {'puppet': {'exec': True, 'exec_args': [
+ '--onetime', '--detailed-exitcodes']}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(
+ ['puppet', 'agent', '--onetime', '--detailed-exitcodes'],
+ capture=False)],
+ m_subp.call_args_list)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_string_if_requested(self,
+ m_subp, m_auto):
+ """Run puppet with 'exec_args' string if 'exec' is set to True."""
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {'puppet': {'exec': True,
+ 'exec_args': '--onetime --detailed-exitcodes'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(
+ ['puppet', 'agent', '--onetime', '--detailed-exitcodes'],
+ capture=False)],
+ m_subp.call_args_list)
+
+
+URL_MOCK = mock.Mock()
+URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"'
+
+
+@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=(None, None))
+@mock.patch(
+ 'cloudinit.config.cc_puppet.url_helper.readurl',
+ return_value=URL_MOCK, autospec=True,
+)
+class TestInstallPuppetAio(HttprettyTestCase):
+ def test_install_with_default_arguments(self, m_readurl, m_subp):
+ """Install AIO with no arguments"""
+ cc_puppet.install_puppet_aio()
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '--cleanup'], capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_custom_url(self, m_readurl, m_subp):
+ """Install AIO from custom URL"""
+ cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh')
+ m_readurl.assert_called_with(
+ url='http://custom.url/path/to/script.sh',
+ retries=5)
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '--cleanup'], capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_version(self, m_readurl, m_subp):
+ """Install AIO with specific version"""
+ cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0')
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_collection(self, m_readurl, m_subp):
+ """Install AIO with specific collection"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly')
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'],
+ capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_no_cleanup(self, m_readurl, m_subp):
+ """Install AIO with no cleanup"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, None, False)
+
+ self.assertEqual(
+ [mock.call([mock.ANY], capture=False)],
+ m_subp.call_args_list)
diff --git a/tests/unittests/test_handler/test_handler_resolv_conf.py b/tests/unittests/test_handler/test_handler_resolv_conf.py
new file mode 100644
index 00000000..96139001
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_resolv_conf.py
@@ -0,0 +1,105 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.config import cc_resolv_conf
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+from copy import deepcopy
+
+from cloudinit.tests import helpers as t_help
+
+import logging
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+LOG = logging.getLogger(__name__)
+
+
+class TestResolvConf(t_help.FilesystemMockingTestCase):
+ with_logs = True
+ cfg = {'manage_resolv_conf': True, 'resolv_conf': {}}
+
+ def setUp(self):
+ super(TestResolvConf, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ util.ensure_dir(os.path.join(self.tmp, 'data'))
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _fetch_distro(self, kind, conf=None):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ conf = {} if conf is None else conf
+ return cls(kind, conf, paths)
+
+ def call_resolv_conf_handler(self, distro_name, conf, cc=None):
+ if not cc:
+ ds = None
+ distro = self._fetch_distro(distro_name, conf)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc_resolv_conf.handle('cc_resolv_conf', conf, cc, LOG, [])
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_systemd_resolved(self, m_render_to_file):
+ self.call_resolv_conf_handler('photon', self.cfg)
+
+ assert [
+ mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_no_param(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp.pop('resolv_conf')
+ self.call_resolv_conf_handler('photon', tmp)
+
+ self.assertIn('manage_resolv_conf True but no parameters provided',
+ self.logs.getvalue())
+ assert [
+ mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp['manage_resolv_conf'] = False
+ self.call_resolv_conf_handler('photon', tmp)
+ self.assertIn("'manage_resolv_conf' present but set to False",
+ self.logs.getvalue())
+ assert [
+ mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_etc_resolv_conf(self, m_render_to_file):
+ self.call_resolv_conf_handler('rhel', self.cfg)
+
+ assert [
+ mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file):
+ ds = None
+ distro = self._fetch_distro('rhel', self.cfg)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc.distro.resolve_conf_fn = 'bla'
+
+ self.logs.truncate(0)
+ self.call_resolv_conf_handler('rhel', self.cfg, cc)
+
+ self.assertIn('No template found, not rendering resolve configs',
+ self.logs.getvalue())
+
+ assert [
+ mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index 73641b70..1a524c7d 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -120,6 +120,46 @@ class TestHostname(t_help.FilesystemMockingTestCase):
contents = util.load_file(distro.hostname_conf_fn)
self.assertEqual('blah', contents.strip())
+ @mock.patch('cloudinit.distros.photon.subp.subp')
+ def test_photon_hostname(self, m_subp):
+ cfg1 = {
+ 'hostname': 'photon',
+ 'prefer_fqdn_over_hostname': True,
+ 'fqdn': 'test1.vmware.com',
+ }
+ cfg2 = {
+ 'hostname': 'photon',
+ 'prefer_fqdn_over_hostname': False,
+ 'fqdn': 'test2.vmware.com',
+ }
+
+ ds = None
+ m_subp.return_value = (None, None)
+ distro = self._fetch_distro('photon', cfg1)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ for c in [cfg1, cfg2]:
+ cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, [])
+ print("\n", m_subp.call_args_list)
+ if c['prefer_fqdn_over_hostname']:
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['fqdn']],
+ capture=True)
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['hostname']],
+ capture=True)
+ ] not in m_subp.call_args_list
+ else:
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['hostname']],
+ capture=True)
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['fqdn']],
+ capture=True)
+ ] not in m_subp.call_args_list
+
def test_multiple_calls_skips_unchanged_hostname(self):
"""Only new hostname or fqdn values will generate a hostname call."""
distro = self._fetch_distro('debian')
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index b72a62b8..fc77b11e 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -5,7 +5,7 @@ from cloudinit import distros
from cloudinit.net import cmdline
from cloudinit.net import (
eni, interface_has_own_mac, natural_sort_key, netplan, network_state,
- renderers, sysconfig)
+ renderers, sysconfig, networkd)
from cloudinit.sources.helpers import openstack
from cloudinit import temp_utils
from cloudinit import subp
@@ -821,6 +821,32 @@ iface eth1 inet static
NETWORK_CONFIGS = {
'small': {
+ 'expected_networkd_eth99': textwrap.dedent("""\
+ [Match]
+ Name=eth99
+ MACAddress=c0:d6:9f:2c:e8:80
+ [Address]
+ Address=192.168.21.3/24
+ [Network]
+ DHCP=ipv4
+ Domains=barley.maas sach.maas
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ DNS=8.8.8.8 8.8.4.4
+ [Route]
+ Gateway=65.61.151.37
+ Destination=0.0.0.0/0
+ Metric=10000
+ """).rstrip(' '),
+ 'expected_networkd_eth1': textwrap.dedent("""\
+ [Match]
+ Name=eth1
+ MACAddress=cf:d6:af:48:e8:80
+ [Network]
+ DHCP=no
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -938,6 +964,12 @@ NETWORK_CONFIGS = {
"""),
},
'v4_and_v6': {
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=yes
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -973,6 +1005,17 @@ NETWORK_CONFIGS = {
""").rstrip(' '),
},
'v4_and_v6_static': {
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Link]
+ MTUBytes=8999
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.14.2/24
+ Address=2001:1::1/64
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -1059,6 +1102,12 @@ NETWORK_CONFIGS = {
""").rstrip(' '),
},
'dhcpv6_only': {
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -4986,26 +5035,199 @@ class TestEniRoundTrip(CiTestCase):
files['/etc/network/interfaces'].splitlines())
+class TestNetworkdNetRendering(CiTestCase):
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r'^\[(.+)\]$', line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_networkd_default_generation(self, mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ m_chown):
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path)
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ render_target = 'etc/systemd/network/10-cloud-init-eth1000.network'
+ renderer = networkd.Renderer({})
+ renderer.render_network_state(ns, target=render_dir)
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir,
+ render_target)))
+ with open(os.path.join(render_dir, render_target)) as fh:
+ contents = fh.readlines()
+
+ actual = self.create_conf_dict(contents)
+ print(actual)
+
+ expected = textwrap.dedent("""\
+ [Match]
+ Name=eth1000
+ MACAddress=07-1c-c6-75-a4-be
+ [Network]
+ DHCP=ipv4""").rstrip(' ')
+
+ expected = self.create_conf_dict(expected.splitlines())
+
+ self.compare_dicts(actual, expected)
+
+
+class TestNetworkdRoundTrip(CiTestCase):
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r'^\[(.+)\]$', line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _render_and_read(self, network_config=None, state=None, nwkd_path=None,
+ dir=None):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ if not nwkd_path:
+ nwkd_path = '/etc/systemd/network/'
+
+ renderer = networkd.Renderer(config={'network_conf_dir': nwkd_path})
+
+ renderer.render_network_state(ns, target=dir)
+ return dir2dict(dir)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_small_networkd(self, m_chown):
+ nwk_fn1 = '/etc/systemd/network/10-cloud-init-eth99.network'
+ nwk_fn2 = '/etc/systemd/network/10-cloud-init-eth1.network'
+ entry = NETWORK_CONFIGS['small']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn1].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd_eth99'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ actual = files[nwk_fn2].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd_eth1'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['v4_and_v6']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6_static(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['v4_and_v6_static']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_dhcpv6_only(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['dhcpv6_only']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+
class TestRenderersSelect:
@pytest.mark.parametrize(
- 'renderer_selected,netplan,eni,nm,scfg,sys', (
+ 'renderer_selected,netplan,eni,nm,scfg,sys,networkd', (
# -netplan -ifupdown -nm -scfg -sys raises error
- (net.RendererNotFoundError, False, False, False, False, False),
+ (net.RendererNotFoundError, False, False, False, False, False,
+ False),
# -netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', False, True, False, False, False),
+ ('eni', False, True, False, False, False, False),
# +netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', True, True, False, False, False),
+ ('eni', True, True, False, False, False, False),
# +netplan -ifupdown -nm -scfg -sys selects netplan
- ('netplan', True, False, False, False, False),
+ ('netplan', True, False, False, False, False, False),
# Ubuntu with Network-Manager installed
# +netplan -ifupdown +nm -scfg -sys selects netplan
- ('netplan', True, False, True, False, False),
+ ('netplan', True, False, True, False, False, False),
# Centos/OpenSuse with Network-Manager installed selects sysconfig
# -netplan -ifupdown +nm -scfg +sys selects netplan
- ('sysconfig', False, False, True, False, True),
+ ('sysconfig', False, False, True, False, True, False),
+ # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd
+ ('networkd', False, False, False, False, False, True),
),
)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
@mock.patch("cloudinit.net.renderers.netplan.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
@@ -5013,7 +5235,8 @@ class TestRenderersSelect:
@mock.patch("cloudinit.net.renderers.eni.available")
def test_valid_renderer_from_defaults_depending_on_availability(
self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail,
- m_netplan_avail, renderer_selected, netplan, eni, nm, scfg, sys
+ m_netplan_avail, m_networkd_avail, renderer_selected,
+ netplan, eni, nm, scfg, sys, networkd
):
"""Assert proper renderer per DEFAULT_PRIORITY given availability."""
m_eni_avail.return_value = eni # ifupdown pkg presence
@@ -5021,6 +5244,7 @@ class TestRenderersSelect:
m_scfg_avail.return_value = scfg # sysconfig presence
m_sys_avail.return_value = sys # sysconfig/ifup/down presence
m_netplan_avail.return_value = netplan # netplan presence
+ m_networkd_avail.return_value = networkd # networkd presence
if isinstance(renderer_selected, str):
(renderer_name, _rnd_class) = renderers.select(
priority=renderers.DEFAULT_PRIORITY
@@ -5053,7 +5277,7 @@ class TestNetRenderers(CiTestCase):
# available should only be called until one is found.
m_eni_avail.return_value = True
m_sysc_avail.side_effect = Exception("Should not call me")
- found = renderers.search(priority=['eni', 'sysconfig'], first=True)
+ found = renderers.search(priority=['eni', 'sysconfig'], first=True)[0]
self.assertEqual(['eni'], [found[0]])
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@@ -5084,6 +5308,7 @@ class TestNetRenderers(CiTestCase):
('opensuse-tumbleweed', '', ''),
('sles', '', ''),
('centos', '', ''),
+ ('eurolinux', '', ''),
('fedora', '', ''),
('redhat', '', ''),
]
@@ -5094,6 +5319,12 @@ class TestNetRenderers(CiTestCase):
result = sysconfig.available()
self.assertTrue(result)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
+ def test_networkd_available(self, m_nwkd_avail):
+ m_nwkd_avail.return_value = True
+ found = renderers.search(priority=['networkd'], first=False)
+ self.assertEqual('networkd', found[0][0])
+
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
new file mode 100644
index 00000000..38f2edf2
--- /dev/null
+++ b/tests/unittests/test_net_activators.py
@@ -0,0 +1,255 @@
+from collections import namedtuple
+from unittest.mock import patch
+
+import pytest
+
+from cloudinit.net.activators import (
+ DEFAULT_PRIORITY,
+ search_activator,
+ select_activator,
+)
+from cloudinit.net.activators import (
+ IfUpDownActivator,
+ NetplanActivator,
+ NetworkManagerActivator,
+ NetworkdActivator
+)
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.safeyaml import load
+
+
+V1_CONFIG = """\
+version: 1
+config:
+- type: physical
+ name: eth0
+- type: physical
+ name: eth1
+"""
+
+V2_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ eth1:
+ dhcp4: true
+"""
+
+NETPLAN_CALL_LIST = [
+ ((['netplan', 'apply'], ), {}),
+]
+
+
+@pytest.yield_fixture
+def available_mocks():
+ mocks = namedtuple('Mocks', 'm_which, m_file')
+ with patch('cloudinit.subp.which', return_value=True) as m_which:
+ with patch('os.path.isfile', return_value=True) as m_file:
+ yield mocks(m_which, m_file)
+
+
+@pytest.yield_fixture
+def unavailable_mocks():
+ mocks = namedtuple('Mocks', 'm_which, m_file')
+ with patch('cloudinit.subp.which', return_value=False) as m_which:
+ with patch('os.path.isfile', return_value=False) as m_file:
+ yield mocks(m_which, m_file)
+
+
+class TestSearchAndSelect:
+ def test_defaults(self, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY
+
+ activator = select_activator()
+ assert activator == DEFAULT_PRIORITY[0]
+
+ def test_priority(self, available_mocks):
+ new_order = [NetplanActivator, NetworkManagerActivator]
+ resp = search_activator(priority=new_order)
+ assert resp == new_order
+
+ activator = select_activator(priority=new_order)
+ assert activator == new_order[0]
+
+ def test_target(self, available_mocks):
+ search_activator(target='/tmp')
+ assert '/tmp' == available_mocks.m_which.call_args[1]['target']
+
+ select_activator(target='/tmp')
+ assert '/tmp' == available_mocks.m_which.call_args[1]['target']
+
+ @patch('cloudinit.net.activators.IfUpDownActivator.available',
+ return_value=False)
+ def test_first_not_available(self, m_available, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY[1:]
+
+ resp = select_activator()
+ assert resp == DEFAULT_PRIORITY[1]
+
+ def test_priority_not_exist(self, available_mocks):
+ with pytest.raises(ValueError):
+ search_activator(priority=['spam', 'eggs'])
+ with pytest.raises(ValueError):
+ select_activator(priority=['spam', 'eggs'])
+
+ def test_none_available(self, unavailable_mocks):
+ resp = search_activator()
+ assert resp == []
+
+ with pytest.raises(RuntimeError):
+ select_activator()
+
+
+IF_UP_DOWN_AVAILABLE_CALLS = [
+ (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}),
+ (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}),
+ (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}),
+]
+
+NETPLAN_AVAILABLE_CALLS = [
+ (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}),
+]
+
+NETWORK_MANAGER_AVAILABLE_CALLS = [
+ (('nmcli',), {'target': None}),
+]
+
+NETWORKD_AVAILABLE_CALLS = [
+ (('ip',), {'search': ['/usr/bin', '/bin'], 'target': None}),
+ (('systemctl',), {'search': ['/usr/bin', '/bin'], 'target': None}),
+]
+
+
+@pytest.mark.parametrize('activator, available_calls', [
+ (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS),
+ (NetplanActivator, NETPLAN_AVAILABLE_CALLS),
+ (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS),
+ (NetworkdActivator, NETWORKD_AVAILABLE_CALLS),
+])
+class TestActivatorsAvailable:
+ def test_available(
+ self, activator, available_calls, available_mocks
+ ):
+ activator.available()
+ assert available_mocks.m_which.call_args_list == available_calls
+
+
+IF_UP_DOWN_BRING_UP_CALL_LIST = [
+ ((['ifup', 'eth0'], ), {}),
+ ((['ifup', 'eth1'], ), {}),
+]
+
+NETWORK_MANAGER_BRING_UP_CALL_LIST = [
+ ((['nmcli', 'connection', 'up', 'ifname', 'eth0'], ), {}),
+ ((['nmcli', 'connection', 'up', 'ifname', 'eth1'], ), {}),
+]
+
+NETWORKD_BRING_UP_CALL_LIST = [
+ ((['ip', 'link', 'set', 'up', 'eth0'], ), {}),
+ ((['ip', 'link', 'set', 'up', 'eth1'], ), {}),
+ ((['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'], ), {}),
+]
+
+
+@pytest.mark.parametrize('activator, expected_call_list', [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_UP_CALL_LIST),
+])
+class TestActivatorsBringUp:
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_up_interface('eth0')
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ index = 0
+ activator.bring_up_interfaces(['eth0', 'eth1'])
+ for call in m_subp.call_args_list:
+ assert call == expected_call_list[index]
+ index += 1
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+
+IF_UP_DOWN_BRING_DOWN_CALL_LIST = [
+ ((['ifdown', 'eth0'], ), {}),
+ ((['ifdown', 'eth1'], ), {}),
+]
+
+NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [
+ ((['nmcli', 'connection', 'down', 'eth0'], ), {}),
+ ((['nmcli', 'connection', 'down', 'eth1'], ), {}),
+]
+
+NETWORKD_BRING_DOWN_CALL_LIST = [
+ ((['ip', 'link', 'set', 'down', 'eth0'], ), {}),
+ ((['ip', 'link', 'set', 'down', 'eth1'], ), {}),
+]
+
+
+@pytest.mark.parametrize('activator, expected_call_list', [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_DOWN_CALL_LIST),
+])
+class TestActivatorsBringDown:
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interface('eth0')
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interfaces(['eth0', 'eth1'])
+ assert expected_call_list == m_subp.call_args_list
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 414b4830..466d472b 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -1,8 +1,24 @@
-from cloudinit import net
+import os
+import yaml
+
+import cloudinit.net
+import cloudinit.net.network_state
+from cloudinit.tests.helpers import (CiTestCase, mock, readResource, dir2dict)
-from cloudinit.tests.helpers import (CiTestCase, mock, readResource)
SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
+V1 = """
+config:
+- id: eno1
+ mac_address: 08:94:ef:51:ae:e0
+ mtu: 1470
+ name: eno1
+ subnets:
+ - address: 172.20.80.129/25
+ type: static
+ type: physical
+version: 1
+"""
class TestInterfacesByMac(CiTestCase):
@@ -12,8 +28,49 @@ class TestInterfacesByMac(CiTestCase):
def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp):
mock_is_FreeBSD.return_value = True
mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0)
- a = net.get_interfaces_by_mac()
+ a = cloudinit.net.get_interfaces_by_mac()
assert a == {'52:54:00:50:b7:0d': 'vtnet0',
'80:00:73:63:5c:48': 're0.33',
'02:14:39:0e:25:00': 'bridge0',
'02:ff:60:8c:f3:72': 'vnet0:11'}
+
+
+class TestFreeBSDRoundTrip(CiTestCase):
+
+ def _render_and_read(self, network_config=None, state=None,
+ netplan_path=None, target=None):
+ if target is None:
+ target = self.tmp_dir()
+ os.mkdir("%s/etc" % target)
+ with open("%s/etc/rc.conf" % target, 'a') as fd:
+ fd.write("# dummy rc.conf\n")
+ with open("%s/etc/resolv.conf" % target, 'a') as fd:
+ fd.write("# dummy resolv.conf\n")
+
+ if network_config:
+ ns = cloudinit.net.network_state.parse_net_config_data(
+ network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ renderer = cloudinit.net.freebsd.Renderer()
+ renderer.render_network_state(ns, target=target)
+ return dir2dict(target)
+
+ @mock.patch('cloudinit.subp.subp')
+ def test_render_output_has_yaml(self, mock_subp):
+
+ entry = {
+ 'yaml': V1,
+ }
+ network_config = yaml.load(entry['yaml'])
+ ns = cloudinit.net.network_state.parse_net_config_data(network_config)
+ files = self._render_and_read(state=ns)
+ assert files == {
+ '/etc/resolv.conf': '# dummy resolv.conf\n',
+ '/etc/rc.conf': (
+ "# dummy rc.conf\n"
+ "ifconfig_eno1="
+ "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n")}
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 495e2669..00d50e66 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -9,8 +9,9 @@ from cloudinit import subp
from cloudinit import util
# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
-DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
- "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora",
+ "freebsd", "netbsd", "openbsd", "photon", "rhel", "suse",
+ "ubuntu", "unknown"]
@pytest.mark.allow_subp_for(sys.executable)
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 9f11fd5c..b78a6939 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -113,6 +113,7 @@ class TestReportingEvent(TestCase):
class TestFinishReportingEvent(TestCase):
+
def test_as_has_result(self):
result = events.status.SUCCESS
name, desc = 'test_name', 'test_desc'
@@ -121,6 +122,23 @@ class TestFinishReportingEvent(TestCase):
self.assertTrue('result' in ret)
self.assertEqual(ret['result'], result)
+ def test_has_result_with_optional_post_files(self):
+ result = events.status.SUCCESS
+ name, desc, files = 'test_name', 'test_desc', [
+ '/really/fake/path/install.log']
+ event = events.FinishReportingEvent(
+ name, desc, result, post_files=files)
+ ret = event.as_dict()
+ self.assertTrue('result' in ret)
+ self.assertTrue('files' in ret)
+ self.assertEqual(ret['result'], result)
+ posted_install_log = ret['files'][0]
+ self.assertTrue('path' in posted_install_log)
+ self.assertTrue('content' in posted_install_log)
+ self.assertTrue('encoding' in posted_install_log)
+ self.assertEqual(posted_install_log['path'], files[0])
+ self.assertEqual(posted_install_log['encoding'], 'base64')
+
class TestBaseReportingHandler(TestCase):
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index fd1d1bac..a66788bf 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -1,6 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
+
from collections import namedtuple
+from functools import partial
from unittest.mock import patch
from cloudinit import ssh_util
@@ -8,13 +11,48 @@ from cloudinit.tests import helpers as test_helpers
from cloudinit import util
# https://stackoverflow.com/questions/11351032/
-FakePwEnt = namedtuple(
- 'FakePwEnt',
- ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid'])
+FakePwEnt = namedtuple('FakePwEnt', [
+ 'pw_name',
+ 'pw_passwd',
+ 'pw_uid',
+ 'pw_gid',
+ 'pw_gecos',
+ 'pw_dir',
+ 'pw_shell',
+])
FakePwEnt.__new__.__defaults__ = tuple(
"UNSET_%s" % n for n in FakePwEnt._fields)
+def mock_get_owner(updated_permissions, value):
+ try:
+ return updated_permissions[value][0]
+ except ValueError:
+ return util.get_owner(value)
+
+
+def mock_get_group(updated_permissions, value):
+ try:
+ return updated_permissions[value][1]
+ except ValueError:
+ return util.get_group(value)
+
+
+def mock_get_user_groups(username):
+ return username
+
+
+def mock_get_permissions(updated_permissions, value):
+ try:
+ return updated_permissions[value][2]
+ except ValueError:
+ return util.get_permissions(value)
+
+
+def mock_getpwnam(users, username):
+ return users[username]
+
+
# Do not use these public keys, most of them are fetched from
# the testdata for OpenSSH, and their private keys are available
# https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata
@@ -552,12 +590,30 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
ssh_util.render_authorizedkeysfile_paths(
"/opt/%u/keys", "/home/bobby", "bobby"))
+ def test_user_file(self):
+ self.assertEqual(
+ ["/opt/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u", "/home/bobby", "bobby"))
+
+ def test_user_file2(self):
+ self.assertEqual(
+ ["/opt/bobby/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u/%u", "/home/bobby", "bobby"))
+
def test_multiple(self):
self.assertEqual(
["/keys/path1", "/keys/path2"],
ssh_util.render_authorizedkeysfile_paths(
"/keys/path1 /keys/path2", "/home/bobby", "bobby"))
+ def test_multiple2(self):
+ self.assertEqual(
+ ["/keys/path1", "/keys/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/keys/path1 /keys/%u", "/home/bobby", "bobby"))
+
def test_relative(self):
self.assertEqual(
["/home/bobby/.secret/keys"],
@@ -570,56 +626,774 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
ssh_util.render_authorizedkeysfile_paths(
"%h/.keys", "/homedirs/bobby", "bobby"))
+ def test_all(self):
+ self.assertEqual(
+ ["/homedirs/bobby/.keys", "/homedirs/bobby/.secret/keys",
+ "/keys/path1", "/opt/bobby/keys"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "%h/.keys .secret/keys /keys/path1 /opt/%u/keys",
+ "/homedirs/bobby", "bobby"))
-class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
-
- @patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order1(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby')
- m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
- user_keys = self.tmp_path('user_keys')
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
- sshd_config = self.tmp_path('sshd_config')
+ def create_fake_users(self, names, mock_permissions,
+ m_get_group, m_get_owner, m_get_permissions,
+ m_getpwnam, users):
+ homes = []
+
+ root = '/tmp/root'
+ fpw = FakePwEnt(pw_name="root", pw_dir=root)
+ users["root"] = fpw
+
+ for name in names:
+ home = '/tmp/home/' + name
+ fpw = FakePwEnt(pw_name=name, pw_dir=home)
+ users[name] = fpw
+ homes.append(home)
+
+ m_get_permissions.side_effect = partial(
+ mock_get_permissions, mock_permissions)
+ m_get_owner.side_effect = partial(mock_get_owner, mock_permissions)
+ m_get_group.side_effect = partial(mock_get_group, mock_permissions)
+ m_getpwnam.side_effect = partial(mock_getpwnam, users)
+ return homes
+
+ def create_user_authorized_file(self, home, filename, content_key, keys):
+ user_ssh_folder = "%s/.ssh" % home
+ # /tmp/home/<user>/.ssh/authorized_keys = content_key
+ authorized_keys = self.tmp_path(filename, dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_global_authorized_file(self, filename, content_key, keys):
+ authorized_keys = self.tmp_path(filename, dir='/tmp')
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_sshd_config(self, authorized_keys_files):
+ sshd_config = self.tmp_path('sshd_config', dir="/tmp")
util.write_file(
sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ "AuthorizedKeysFile " + authorized_keys_files
)
+ return sshd_config
+ def execute_and_check(self, user, sshd_config, solution, keys,
+ delete_keys=True):
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
+ user, sshd_config)
content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.assertEqual(auth_key_fn, solution)
+ for path, key in keys.items():
+ if path == solution:
+ self.assertTrue(VALID_CONTENT[key] in content)
+ else:
+ self.assertFalse(VALID_CONTENT[key] in content)
+
+ if delete_keys and os.path.isdir("/tmp/home/"):
+ util.delete_dir_contents("/tmp/home/")
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order2(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie')
- m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
- user_keys = self.tmp_path('user_keys')
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys', 'rsa', keys
+ )
- sshd_config = self.tmp_path('sshd_config')
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys', 'dsa', keys
)
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config
+ # /tmp/sshd_config
+ options = "%s %s" % (authorized_keys, user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys', 'dsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %s" % (user_keys, authorized_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys', 'dsa', keys
+ )
+
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'ecdsa', keys
+ )
+
+ options = "%s %s %s" % (authorized_keys_global, user_keys,
+ authorized_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys2', 'rsa', keys
+ )
+
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys3', 'dsa', keys
+ )
+
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'ecdsa', keys
+ )
+
+ options = "%s %s %s" % (authorized_keys_global, authorized_keys,
+ user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_global_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
+
+ # /tmp/etc/ssh/authorized_keys = rsa
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'rsa', keys
+ )
+
+ options = "%s" % authorized_keys_global
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home
+ self.execute_and_check(user_bobby, sshd_config, default, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_standard(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ options = ".ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_custom(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys2', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys
)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ options = ".ssh/authorized_keys2"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600),
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600),
+ '/tmp/home/suzie/.ssh/user_keys3': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ self.create_user_authorized_file(
+ home_bobby, 'authorized_keys2', 'rsa', keys
+ )
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.create_user_authorized_file(
+ home_bobby, 'user_keys3', 'dsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys2', 'ecdsa', keys
+ )
+
+ options = "%s %s %%h/.ssh/authorized_keys2" % \
+ (authorized_keys_global, user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, user_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files_badguy(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600),
+ '/tmp/home/badguy': ('root', 'root', 0o755),
+ '/tmp/home/badguy/home': ('root', 'root', 0o755),
+ '/tmp/home/badguy/home/bobby': ('root', 'root', 0o655),
+ }
+
+ user_bobby = 'bobby'
+ user_badguy = 'badguy'
+ home_bobby, *_ = self.create_fake_users(
+ [user_bobby, user_badguy], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys2', 'rsa', keys
+ )
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.create_user_authorized_file(
+ home_bobby, 'user_keys3', 'dsa', keys
+ )
+
+ # /tmp/home/badguy/home/bobby = ""
+ authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy")
+ util.write_file(authorized_keys2, '')
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys2', 'ecdsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %%h/.ssh/authorized_keys2 %s %s" % \
+ (authorized_keys2, authorized_keys_global, user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_unaccessible_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/etc': ('root', 'root', 0o755),
+ '/tmp/etc/ssh': ('root', 'root', 0o755),
+ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o700),
+ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600),
+ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600),
+
+ '/tmp/home/badguy': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh/authorized_keys':
+ ('badguy', 'badguy', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_badguy = 'badguy'
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ self.create_global_authorized_file(
+ 'etc/ssh/userkeys/bobby', 'dsa', keys
+ )
+
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ self.create_global_authorized_file(
+ 'etc/ssh/userkeys/badguy', 'ecdsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_accessible_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/etc': ('root', 'root', 0o755),
+ '/tmp/etc/ssh': ('root', 'root', 0o755),
+ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o755),
+ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600),
+ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600),
+
+ '/tmp/home/badguy': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh/authorized_keys':
+ ('badguy', 'badguy', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_badguy = 'badguy'
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ authorized_keys = self.create_global_authorized_file(
+ 'etc/ssh/userkeys/bobby', 'dsa', keys
+ )
+
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ authorized_keys2 = self.create_global_authorized_file(
+ 'etc/ssh/userkeys/badguy', 'ecdsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ default = "%s/.ssh/authorized_keys" % home_suzie
+ self.execute_and_check(user_suzie, sshd_config, default, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys2)
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home_bobby
+ self.execute_and_check(
+ user_bobby, sshd_config, default, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_user_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'ecdsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %s %s" % \
+ (authorized_keys_global, authorized_keys, authorized_keys2)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index e5292001..2290cab7 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -999,4 +999,22 @@ class TestFindDevs:
devlist = util.find_devs_with_netbsd(criteria=criteria)
assert devlist == expected_devlist
+ @pytest.mark.parametrize(
+ 'criteria,expected_devlist', (
+ (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']),
+ ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']),
+ ('TYPE=vfat', ['/dev/vbd0']),
+ ('LABEL_FATBOOT=A_LABEL', # lp: #1841466
+ ['/dev/vbd0', '/dev/cd0', '/dev/acd0']),
+ )
+ )
+ @mock.patch("cloudinit.subp.subp")
+ def test_find_devs_with_dragonflybsd(self, m_subp, criteria,
+ expected_devlist):
+ m_subp.return_value = (
+ 'md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', ''
+ )
+ devlist = util.find_devs_with_dragonflybsd(criteria=criteria)
+ assert devlist == expected_devlist
+
# vi: ts=4 expandtab
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 48995057..4fa108aa 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -1,5 +1,6 @@
ader1990
ajmyyra
+akutz
AlexBaranowski
Aman306
andrewbogott
@@ -12,6 +13,7 @@ BirknerAlex
bmhughes
candlerb
cawamata
+ciprianbadescu
dankenigsberg
ddymko
dermotbradley
@@ -19,8 +21,12 @@ dhensby
eandersson
eb3095
emmanuelthome
+esposem
+GabrielNagy
giggsoff
hamalq
+impl
+irishgordo
izzyleung
johnsonshi
jordimassaguerpla
@@ -30,20 +36,26 @@ klausenbusk
landon912
lucasmoura
lungj
+mal
+mamercad
manuelisimo
marlluslustosa
matthewruffell
mitechie
+nazunalika
nicolasbock
nishigori
olivierlemasle
omBratteng
onitake
qubidt
+renanrodrigo
riedel
+sarahwzadara
slyon
smoser
sshedi
+stappersg
TheRealFalcon
taoyama
timothegenzmer
diff --git a/tools/build-on-netbsd b/tools/build-on-netbsd
index d2a7067d..32837058 100755
--- a/tools/build-on-netbsd
+++ b/tools/build-on-netbsd
@@ -2,17 +2,24 @@
fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+PYTHON="${PYTHON:-python3}"
+if [ ! $(which ${PYTHON}) ]; then
+ echo "Please install python first."
+ exit 1
+fi
+py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, sys.version_info.minor))')
+
# Check dependencies:
depschecked=/tmp/c-i.dependencieschecked
pkgs="
bash
dmidecode
- py37-configobj
- py37-jinja2
- py37-oauthlib
- py37-requests
- py37-setuptools
- py37-yaml
+ ${py_prefix}-configobj
+ ${py_prefix}-jinja2
+ ${py_prefix}-oauthlib
+ ${py_prefix}-requests
+ ${py_prefix}-setuptools
+ ${py_prefix}-yaml
sudo
"
[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages"
@@ -20,8 +27,8 @@ pkgs="
touch $depschecked
# Build the code and install in /usr/pkg/:
-python3.7 setup.py build
-python3.7 setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
+${PYTHON} setup.py build
+${PYTHON} setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
mv -v /usr/local/etc/rc.d/cloud* /etc/rc.d
# Enable cloud-init in /etc/rc.conf:
diff --git a/tools/ds-identify b/tools/ds-identify
index 73e27c71..f509f566 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -125,7 +125,7 @@ DI_DSNAME=""
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud"
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -141,6 +141,7 @@ error() {
debug 0 "$@"
stderr "$@"
}
+
warn() {
set -- "WARN:" "$@"
debug 0 "$@"
@@ -344,7 +345,6 @@ geom_label_status_as() {
return $ret
}
-
read_fs_info_freebsd() {
local oifs="$IFS" line="" delim=","
local ret=0 labels="" dev="" label="" ftype="" isodevs=""
@@ -404,7 +404,6 @@ cached() {
[ -n "$1" ] && _RET="$1" && return || return 1
}
-
detect_virt() {
local virt="${UNAVAILABLE}" r="" out=""
if [ -d /run/systemd ]; then
@@ -450,7 +449,7 @@ detect_virt() {
read_virt() {
cached "$DI_VIRT" && return 0
detect_virt
- DI_VIRT=${_RET}
+ DI_VIRT="${_RET}"
}
is_container() {
@@ -1364,6 +1363,84 @@ dscheck_Vultr() {
return $DS_NOT_FOUND
}
+vmware_has_envvar_vmx_guestinfo() {
+ [ -n "${VMX_GUESTINFO:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_metadata() {
+ [ -n "${VMX_GUESTINFO_METADATA:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_userdata() {
+ [ -n "${VMX_GUESTINFO_USERDATA:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_vendordata() {
+ [ -n "${VMX_GUESTINFO_VENDORDATA:-}" ]
+}
+
+vmware_has_rpctool() {
+ command -v vmware-rpctool >/dev/null 2>&1
+}
+
+vmware_rpctool_guestinfo() {
+ vmware-rpctool "info-get guestinfo.${1}" 2>/dev/null | grep "[[:alnum:]]"
+}
+
+vmware_rpctool_guestinfo_metadata() {
+ vmware_rpctool_guestinfo "metadata"
+}
+
+vmware_rpctool_guestinfo_userdata() {
+ vmware_rpctool_guestinfo "userdata"
+}
+
+vmware_rpctool_guestinfo_vendordata() {
+ vmware_rpctool_guestinfo "vendordata"
+}
+
+dscheck_VMware() {
+ # Checks to see if there is valid data for the VMware datasource.
+ # The data transports are checked in the following order:
+ #
+ # * envvars
+ # * guestinfo
+ #
+ # Please note when updating this function with support for new data
+ # transports, the order should match the order in the _get_data
+ # function from the file DataSourceVMware.py.
+
+ # Check to see if running in a container and the VMware
+ # datasource is configured via environment variables.
+ if vmware_has_envvar_vmx_guestinfo; then
+ if vmware_has_envvar_vmx_guestinfo_metadata || \
+ vmware_has_envvar_vmx_guestinfo_userdata || \
+ vmware_has_envvar_vmx_guestinfo_vendordata; then
+ return "${DS_FOUND}"
+ fi
+ fi
+
+ # Do not proceed unless the detected platform is VMware.
+ if [ ! "${DI_VIRT}" = "vmware" ]; then
+ return "${DS_NOT_FOUND}"
+ fi
+
+ # Do not proceed if the vmware-rpctool command is not present.
+ if ! vmware_has_rpctool; then
+ return "${DS_NOT_FOUND}"
+ fi
+
+ # Activate the VMware datasource only if any of the fields used
+ # by the datasource are present in the guestinfo table.
+ if { vmware_rpctool_guestinfo_metadata || \
+ vmware_rpctool_guestinfo_userdata || \
+ vmware_rpctool_guestinfo_vendordata; } >/dev/null 2>&1; then
+ return "${DS_FOUND}"
+ fi
+
+ return "${DS_NOT_FOUND}"
+}
+
collect_info() {
read_uname_info
read_virt
diff --git a/tools/hook-hotplug b/tools/hook-hotplug
new file mode 100755
index 00000000..ced268b3
--- /dev/null
+++ b/tools/hook-hotplug
@@ -0,0 +1,26 @@
+#!/bin/bash
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# This script checks if cloud-init has hotplug hooked and if
+# cloud-init has finished; if so invoke cloud-init hotplug-hook
+
+is_finished() {
+ [ -e /run/cloud-init/result.json ]
+}
+
+hotplug_enabled() {
+ [ "$(cloud-init devel hotplug-hook -s "${SUBSYSTEM}" query)" == "enabled" ]
+}
+
+if is_finished && hotplug_enabled; then
+ # open cloud-init's hotplug-hook fifo rw
+ exec 3<>/run/cloud-init/hook-hotplug-cmd
+ env_params=(
+ --subsystem="${SUBSYSTEM}"
+ handle
+ --devpath="${DEVPATH}"
+ --udevaction="${ACTION}"
+ )
+ # write params to cloud-init's hotplug-hook fifo
+ echo "${env_params[@]}" >&3
+fi
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 6ad5f701..810154e4 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -23,6 +23,8 @@ DEFAULT_REQUIREMENTS = 'requirements.txt'
# Map the appropriate package dir needed for each distro choice
DISTRO_PKG_TYPE_MAP = {
'centos': 'redhat',
+ 'eurolinux': 'redhat',
+ 'rocky': 'redhat',
'redhat': 'redhat',
'debian': 'debian',
'ubuntu': 'debian',
@@ -39,6 +41,7 @@ MAYBE_RELIABLE_YUM_INSTALL = [
error ":: http proxy in use => forcing the use of fixed URLs in /etc/yum.repos.d/*.repo"
sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo
sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
+ sed -i 's/download\.example/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
}
configure_repos_for_proxy_use
n=0; max=10;
@@ -64,11 +67,15 @@ ZYPPER_INSTALL = [
'--auto-agree-with-licenses']
DRY_DISTRO_INSTALL_PKG_CMD = {
+ 'rocky': ['yum', 'install', '--assumeyes'],
'centos': ['yum', 'install', '--assumeyes'],
+ 'eurolinux': ['yum', 'install', '--assumeyes'],
'redhat': ['yum', 'install', '--assumeyes'],
}
DISTRO_INSTALL_PKG_CMD = {
+ 'rocky': MAYBE_RELIABLE_YUM_INSTALL,
+ 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL,
'centos': MAYBE_RELIABLE_YUM_INSTALL,
'redhat': MAYBE_RELIABLE_YUM_INSTALL,
'debian': ['apt', 'install', '-y'],
@@ -81,6 +88,7 @@ DISTRO_INSTALL_PKG_CMD = {
# List of base system packages required to enable ci automation
CI_SYSTEM_BASE_PKGS = {
'common': ['make', 'sudo', 'tar'],
+ 'eurolinux': ['python3-tox'],
'redhat': ['python3-tox'],
'centos': ['python3-tox'],
'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
@@ -273,10 +281,10 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False):
cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro]
install_cmd.extend(cmd)
- if distro in ['centos', 'redhat']:
+ if distro in ['centos', 'redhat', 'rocky', 'eurolinux']:
# CentOS and Redhat need epel-release to access oauthlib and jsonschema
subprocess.check_call(install_cmd + ['epel-release'])
- if distro in ['suse', 'opensuse', 'redhat', 'centos']:
+ if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos', 'eurolinux']:
pkg_list.append('rpm-build')
subprocess.check_call(install_cmd + pkg_list)
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index f5990748..30f82521 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -5,8 +5,8 @@ import os
import sys
VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu",
- "unknown"]
+ "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon",
+ "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"]
if "avoid-pep8-E402-import-not-top-of-file":
diff --git a/tools/run-container b/tools/run-container
index 15948e77..e049dfdc 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -191,7 +191,7 @@ os_info() {
get_os_info() {
# run inside container, set OS_NAME, OS_VERSION
- # example OS_NAME are centos, debian, opensuse
+ # example OS_NAME are centos, debian, opensuse, rockylinux
[ -n "${OS_NAME:-}" -a -n "${OS_VERSION:-}" ] && return 0
if [ -f /etc/os-release ]; then
OS_NAME=$(sh -c '. /etc/os-release; echo $ID')
@@ -247,7 +247,7 @@ apt_install() {
install_packages() {
get_os_info || return
case "$OS_NAME" in
- centos) yum_install "$@";;
+ centos|rocky*) yum_install "$@";;
opensuse) zypper_install "$@";;
debian|ubuntu) apt_install "$@";;
*) error "Do not know how to install packages on ${OS_NAME}";
@@ -353,6 +353,7 @@ wait_for_boot() {
inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
inside "$name" sh -c "sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo"
inside "$name" sh -c "sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
+ inside "$name" sh -c "sed -i 's/download\.example/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
else
debug 1 "do not know how to configure proxy on $OS_NAME"
fi
@@ -485,7 +486,7 @@ main() {
local build_pkg="" build_srcpkg="" pkg_ext="" distflag=""
case "$OS_NAME" in
- centos) distflag="--distro=redhat";;
+ centos|rocky) distflag="--distro=redhat";;
opensuse) distflag="--distro=suse";;
esac
@@ -494,7 +495,7 @@ main() {
build_pkg="./packages/bddeb -d"
build_srcpkg="./packages/bddeb -S -d"
pkg_ext=".deb";;
- centos|opensuse)
+ centos|opensuse|rocky)
build_pkg="./packages/brpm $distflag"
build_srcpkg="./packages/brpm $distflag --srpm"
pkg_ext=".rpm";;
diff --git a/tox.ini b/tox.ini
index bf8cb78b..27c16ef3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,7 +23,7 @@ setenv =
basepython = python3
deps =
# requirements
- pylint==2.6.0
+ pylint==2.9.3
# test-requirements because unit tests are now present in cloudinit tree
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
@@ -157,7 +157,15 @@ passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* TRAVIS
deps =
-r{toxinidir}/integration-requirements.txt
setenv =
- PYTEST_ADDOPTS="-m ci"
+ PYTEST_ADDOPTS="-m ci and not adhoc"
+
+[testenv:integration-tests-jenkins]
+commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_*
+deps =
+ -r{toxinidir}/integration-requirements.txt
+setenv =
+ PYTEST_ADDOPTS="-m not adhoc"
[pytest]
# TODO: s/--strict/--strict-markers/ once xenial support is dropped
@@ -174,9 +182,10 @@ markers =
gce: test will only run on GCE platform
azure: test will only run on Azure platform
oci: test will only run on OCI platform
- openstack: test will only run on openstack
+ openstack: test will only run on openstack platform
lxd_config_dict: set the config_dict passed on LXD instance creation
lxd_container: test will only run in LXD container
+ lxd_setup: specify callable to be called between init and start
lxd_use_exec: `execute` will use `lxc exec` instead of SSH
lxd_vm: test will only run in LXD VM
not_xenial: test cannot run on the xenial release
@@ -189,3 +198,4 @@ markers =
sru_next: test is part of the next SRU verification
ubuntu: this test should run on Ubuntu
unstable: skip this test because it is flakey
+ adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins)
diff --git a/udev/10-cloud-init-hook-hotplug.rules b/udev/10-cloud-init-hook-hotplug.rules
new file mode 100644
index 00000000..2e382679
--- /dev/null
+++ b/udev/10-cloud-init-hook-hotplug.rules
@@ -0,0 +1,6 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# Handle device adds only
+ACTION!="add|remove", GOTO="cloudinit_end"
+LABEL="cloudinit_hook"
+SUBSYSTEM=="net|block", RUN+="/usr/lib/cloud-init/hook-hotplug"
+LABEL="cloudinit_end"