From a4ffbb061f1baa79ec292fea4c1aff91b1f55f3f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 15 Nov 2022 13:53:37 -0700 Subject: changelog: capture 22.3.1-4 releases Manually capture in changelog published releases and security fixes. --- ChangeLog | 91 +++++++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 31 deletions(-) diff --git a/ChangeLog b/ChangeLog index 16d58d3a..11a8538e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -117,6 +117,35 @@ - Identify Huawei Cloud as OpenStack (#1689) [huang xinjie] - doc: add reporting suggestion to FAQ (SC-1236) (#1698) +22.3.4 + - Fix Oracle DS primary interface when using IMDS (LP: #1989686) + +22.3.3 + - Fix Oracle DS not setting subnet when using IMDS (LP: #1989686) + +22.3.2 + - azure: define new attribute for pre-22.3 pickles (#1725) + - sources/azure: ensure instance id is always correct (#1727) + +22.3.1 + - Fix v2 interface matching when no MAC (LP: #1986551) + - test: reduce number of network dependencies in flaky test (#1702) + - docs: publish cc_ubuntu_autoinstall docs to rtd (#1696) + - net: Fix EphemeraIPNetwork (#1697) [Alberto Contreras] + - test: make ansible test work across older versions (#1691) + - Networkd multi-address support/fix (#1685) [Teodor Garzdin] + - make: drop broken targets (#1688) + - net: Passthough v2 netconfigs in netplan systems (#1650) + [Alberto Contreras] (LP: #1978543) + - NM ipv6 connection does not work on Azure and Openstack (#1616) + [Emanuele Giuseppe Esposito] + - Fix check_format_tip (#1679) [Alberto Contreras] + - DataSourceVMware: fix var use before init (#1674) + [Andrew Kutz] (LP: #1987005) + - rpm/copr: ensure RPM represents new clean.d dir artifacts (#1680) + - test: avoid centos leaked check of /etc/yum.repos.d/epel-testing.repo + (#1676) + 22.3 - sources: obj.pkl cache should be written anyime get_data is run (#1669) - schema: drop release number from version file (#1664) @@ -3134,7 +3163,7 @@ - support network rendering to sysconfig (for centos and RHEL) - write_files: if no permissions are given, just use default without warn. - user_data: fix error when user-data is not utf-8 decodable (LP: #1532072) - - fix mcollective module with python3 (LP: #1597699) [Sergii Golovatiuk] + - fix mcollective module with python3 (LP: #1597699) [Sergii Golovatiuk] 0.7.6: - open 0.7.6 @@ -3259,7 +3288,7 @@ filesystems. Useful if attached disks are not formatted (LP: #1218506) - Fix usage of libselinux-python when selinux is disabled. [Garrett Holmstrom] - multi_log: only write to /dev/console if it exists [Garrett Holmstrom] - - config/cloud.cfg: add 'sudo' to list groups for the default user + - config/cloud.cfg: add 'sudo' to list groups for the default user (LP: #1228228) - documentation fix for use of 'mkpasswd' [Eric Nordlund] - respect /etc/growroot-disabled file (LP: #1234331) @@ -3325,7 +3354,7 @@ can be more useful. 0.7.1: - - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 + - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) - landscape: install landscape-client package if not installed. only take action if cloud-config is present (LP: #1066115) @@ -3374,14 +3403,14 @@ 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. - - utilize the 'exception_cb' above to modify the oauth timestamp in + - utilize the 'exception_cb' above to modify the oauth timestamp in DataSourceMAAS requests if a 401 or 403 is received. (LP: #978127) - catch signals and exit rather than stack tracing - if logging fails, enable a fallback logger by patching the logging module - do not 'start networking' in cloud-init-nonet, but add cloud-init-container job that runs only if in container and emits net-device-added (LP: #1031065) - - search only top level dns for 'instance-data' in + - search only top level dns for 'instance-data' in DataSourceEc2 (LP: #1040200) - add support for config-drive-v2 (LP:#1037567) - support creating users, including the default user. @@ -3419,10 +3448,10 @@ reduces reuse and limits future functionality, and makes testing harder) - removal of global config that defined paths, shared config, now this is via objects making unit testing testing and global side-effects a non issue - - creation of a 'helpers.py' - - this contains an abstraction for the 'lock' like objects that the various - module/handler running stages use to avoid re-running a given - module/handler for a given frequency. this makes it separated from + - creation of a 'helpers.py' + - this contains an abstraction for the 'lock' like objects that the various + module/handler running stages use to avoid re-running a given + module/handler for a given frequency. this makes it separated from the actual usage of that object (thus helpful for testing and clear lines usage and how the actual job is accomplished) - a common 'runner' class is the main entrypoint using these locks to @@ -3431,11 +3460,11 @@ - add in a 'paths' object that provides access to the previously global and/or config based paths (thus providing a single entrypoint object/type that provides path information) - - this also adds in the ability to change the path when constructing - that path 'object' and adding in additional config that can be used to + - this also adds in the ability to change the path when constructing + that path 'object' and adding in additional config that can be used to alter the root paths of 'joins' (useful for testing or possibly useful in chroots?) - - config options now avaiable that can alter the 'write_root' and the + - config options now avaiable that can alter the 'write_root' and the 'read_root' when backing code uses the paths join() function - add a config parser subclass that will automatically add unknown sections and return default values (instead of throwing exceptions for these cases) @@ -3459,7 +3488,7 @@ the passed in logger (its still passed in) - ensure that all places where exception are caught and where applicable that the util logexc() is called, so that no exceptions that may occur - are dropped without first being logged (where it makes sense for this + are dropped without first being logged (where it makes sense for this to happen) - add a 'requires' file that lists cloud-init dependencies - applying it in package creation (bdeb and brpm) as well as using it @@ -3471,12 +3500,12 @@ subp() utility method, which now has an exception type that will provide detailed information on python 2.6 and 2.7 - forced all code loading, moving, chmod, writing files and other system - level actions to go through standard set of util functions, this greatly + level actions to go through standard set of util functions, this greatly helps in debugging and determining exactly which system actions cloud-init is performing - adjust url fetching and url trying to go through a single function that reads urls in the new 'url helper' file, this helps in tracing, debugging - and knowing which urls are being called and/or posted to from with-in + and knowing which urls are being called and/or posted to from with-in cloud-init code - add in the sending of a 'User-Agent' header for all urls fetched that do not provide there own header mapping, derive this user-agent from @@ -3486,7 +3515,7 @@ and defined output that should be easier to parse than a custom format - add a set of distro specific classes, that handle distro specific actions that modules and or handler code can use as needed, this is organized into - a base abstract class with child classes that implement the shared + a base abstract class with child classes that implement the shared functionality. config determines exactly which subclass to load, so it can be easily extended as needed. - current functionality @@ -3498,16 +3527,16 @@ - interface up/down activating - implemented a debian + ubuntu subclass - implemented a redhat + fedora subclass - - adjust the root 'cloud.cfg' file to now have distrobution/path specific + - adjust the root 'cloud.cfg' file to now have distrobution/path specific configuration values in it. these special configs are merged as the normal config is, but the system level config is not passed into modules/handlers - modules/handlers must go through the path and distro object instead - - have the cloudstack datasource test the url before calling into boto to + - have the cloudstack datasource test the url before calling into boto to avoid the long wait for boto to finish retrying and finally fail when the gateway meta-data address is unavailable - add a simple mock ec2 meta-data python based http server that can serve a very simple set of ec2 meta-data back to callers - - useful for testing or for understanding what the ec2 meta-data + - useful for testing or for understanding what the ec2 meta-data service can provide in terms of data or functionality - for ssh key and authorized key file parsing add in classes and util functions that maintain the state of individual lines, allowing for a @@ -3541,7 +3570,7 @@ - use 'is None' instead of the frowned upon '== None' which picks up a large set of 'true' cases than is typically desired (ie for objects that have there own equality) - - use context managers on locks, tempdir, chdir, file, selinux, umask, + - use context managers on locks, tempdir, chdir, file, selinux, umask, unmounting commands so that these actions do not have to be closed and/or cleaned up manually in finally blocks, which is typically not done and will eventually be a bug in the future @@ -3565,7 +3594,7 @@ - place the rfc 8222 time formatting and uptime repeated pieces of code in the util module as a set of function with the name 'time_rfc2822'/'uptime' - separate the pylint+pep8 calling from one tool into two indivudal tools so - that they can be called independently, add make file sections that can be + that they can be called independently, add make file sections that can be used to call these independently - remove the support for the old style config that was previously located in '/etc/ec2-init/ec2-config.cfg', no longer supported! @@ -3576,12 +3605,12 @@ - use the new defaulting config parser (that will not raise errors on sections that do not exist or return errors when values are fetched that do not exist) in the 'puppet' module - - for config 'modules' add in the ability for the module to provide a list of + - for config 'modules' add in the ability for the module to provide a list of distro names which it is known to work with, if when ran and the distro being used name does not match one of those in this list, a warning will be written out saying that this module may not work correctly on this distrobution - - for all dynamically imported modules ensure that they are fixed up before + - for all dynamically imported modules ensure that they are fixed up before they are used by ensuring that they have certain attributes, if they do not have those attributes they will be set to a sensible set of defaults instead - adjust all 'config' modules and handlers to use the adjusted util functions @@ -3609,7 +3638,7 @@ - support setting of Acquire::HTTP::Proxy via 'apt_proxy' - DataSourceEc2: more resilliant to slow metadata service - config change: 'retries' dropped, 'max_wait' added, timeout increased - - close stdin in all cloud-init programs that are launched at boot + - close stdin in all cloud-init programs that are launched at boot (LP: #903993) - revert management of /etc/hosts to 0.6.1 style (LP: #890501, LP: #871966) - write full ssh keys to console for easy machine consumption (LP: #893400) @@ -3621,7 +3650,7 @@ in the payload parameter. (LP: #874342) - add test case framework [Mike Milner] (LP: #890851) - fix pylint warnings [Juerg Haefliger] (LP: #914739) - - add support for adding and deleting CA Certificates [Mike Milner] + - add support for adding and deleting CA Certificates [Mike Milner] (LP: #915232) - in ci-info lines, use '.' to indicate empty field for easier machine reading - support empty lines in "#include" files (LP: #923043) @@ -3635,17 +3664,17 @@ - DataSourceMaaS: add data source for Ubuntu Machines as a Service (MaaS) (LP: #942061) - DataSourceCloudStack: add support for CloudStack datasource [Cosmin Luta] - - add option 'apt_pipelining' to address issue with S3 mirrors + - add option 'apt_pipelining' to address issue with S3 mirrors (LP: #948461) [Ben Howard] - warn on non-multipart, non-handled user-data [Martin Packman] - run resizefs in the background in order to not block boot (LP: #961226) - Fix bug in Chef support where validation_key was present in config, but 'validation_cert' was not (LP: #960547) - - Provide user friendly message when an invalid locale is set + - Provide user friendly message when an invalid locale is set [Ben Howard] (LP: #859814) - Support reading cloud-config from kernel command line parameter and populating local file with it, which can then provide data for DataSources - - improve chef examples for working configurations on 11.10 and 12.04 + - improve chef examples for working configurations on 11.10 and 12.04 [Lorin Hochstein] (LP: #960564) 0.6.2: @@ -3694,7 +3723,7 @@ This was done by changing all users of util.subp to have None input unless specified - Add some debug info to the console when cloud-init runs. - This is useful if debugging, IP and route information is printed to the + This is useful if debugging, IP and route information is printed to the console. - change the mechanism for handling .ssh/authorized_keys, to update entries rather than appending. This ensures that the authorized_keys that are @@ -3756,7 +3785,7 @@ - moved upstart/cloud-run-user-script.conf to upstart/cloud-final.conf - cloud-final.conf now runs runs cloud-config modules similar to cloud-config and cloud-init. - - LP: #653271 + - LP: #653271 - added writing of "boot-finished" to /var/lib/cloud/instance/boot-finished this is the last thing done, indicating cloud-init is finished booting - writes message to console with timestamp and uptime @@ -3779,6 +3808,6 @@ - add support for reading Rightscale style user data (LP: #668400) and acting on it in cloud-config (cc_rightscale_userdata.py) - make the message on 'disable_root' more clear (LP: #672417) - - do not require public key if private is given in ssh cloud-config + - do not require public key if private is given in ssh cloud-config (LP: #648905) # vi: syntax=text textwidth=79 -- cgit v1.2.1 From afae2bd159567c574d120a046b83c083fcf8fe2a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 16 Nov 2022 07:50:33 -0700 Subject: ChangeLog: whitespace cleanup (#1850) --- ChangeLog | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ChangeLog b/ChangeLog index 11a8538e..eb9a104b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -137,14 +137,14 @@ - make: drop broken targets (#1688) - net: Passthough v2 netconfigs in netplan systems (#1650) [Alberto Contreras] (LP: #1978543) - - NM ipv6 connection does not work on Azure and Openstack (#1616) - [Emanuele Giuseppe Esposito] - - Fix check_format_tip (#1679) [Alberto Contreras] - - DataSourceVMware: fix var use before init (#1674) - [Andrew Kutz] (LP: #1987005) - - rpm/copr: ensure RPM represents new clean.d dir artifacts (#1680) - - test: avoid centos leaked check of /etc/yum.repos.d/epel-testing.repo - (#1676) + - NM ipv6 connection does not work on Azure and Openstack (#1616) + [Emanuele Giuseppe Esposito] + - Fix check_format_tip (#1679) [Alberto Contreras] + - DataSourceVMware: fix var use before init (#1674) + [Andrew Kutz] (LP: #1987005) + - rpm/copr: ensure RPM represents new clean.d dir artifacts (#1680) + - test: avoid centos leaked check of /etc/yum.repos.d/epel-testing.repo + (#1676) 22.3 - sources: obj.pkl cache should be written anyime get_data is run (#1669) -- cgit v1.2.1 From 42f701751bc6bd3a3ccbd3930cecdad97c490038 Mon Sep 17 00:00:00 2001 From: s-makin Date: Wed, 16 Nov 2022 15:47:21 +0000 Subject: doc: home page links added (#1852) - Moved URLs to the end of the file - Standardised text between RTD and README.md - Added missing links from README.md --- doc/rtd/index.rst | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 159113f4..94d7c882 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -4,11 +4,13 @@ cloud-init Documentation ######################## Cloud-init is the *industry standard* multi-distribution method for -cross-platform cloud instance initialization. +cross-platform cloud instance initialization. It is supported across all major +public cloud providers, provisioning systems for private cloud infrastructure, +and bare-metal installations. During boot, cloud-init identifies the cloud it is running on and initializes the system accordingly. Cloud instances will automatically be provisioned -during first boot with networking, storage, ssh keys, packages and various +during first boot with networking, storage, SSH keys, packages and various other system aspects already configured. Cloud-init provides the necessary glue between launching a cloud instance and @@ -23,18 +25,19 @@ Project and community Cloud-init is an open source project that warmly welcomes community projects, contributions, suggestions, fixes and constructive feedback. -* `Code of conduct `_ -* Ask questions in IRC on ``#cloud-init`` on Libera -* `Mailing list `_ -* `Contribute on Github `_ -* `Release schedule `_ +* Read our `Code of conduct`_ +* Ask questions in the ``#cloud-init`` `IRC channel on Libera`_ +* Join the `cloud-init mailing list`_ +* `Contribute on Github`_ +* `Release schedule`_ Having trouble? We would like to help! ************************************** - Check out the :ref:`lxd_tutorial` if you're new to cloud-init - Try the :ref:`FAQ` for answers to some common questions -- Find a bug? `Report bugs on Launchpad `_ +- You can also search the cloud-init `mailing list archive`_ +- Find a bug? `Report bugs on Launchpad`_ .. toctree:: :hidden: @@ -99,3 +102,11 @@ Having trouble? We would like to help! topics/docs.rst topics/testing.rst topics/integration_tests.rst +.. LINKS +.. _Code of conduct: https://ubuntu.com/community/code-of-conduct +.. _IRC channel on Libera: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init +.. _cloud-init mailing list: https://launchpad.net/~cloud-init +.. _mailing list archive: https://lists.launchpad.net/cloud-init/ +.. _Contribute on Github: https://cloudinit.readthedocs.io/en/latest/topics/contributing.html +.. _Release schedule: https://discourse.ubuntu.com/t/cloud-init-release-schedule/32244 +.. _Report bugs on Launchpad: https://bugs.launchpad.net/cloud-init/+filebug -- cgit v1.2.1 From f5431e50a3b29db0ee044e7fa8a6a279d6cb14f7 Mon Sep 17 00:00:00 2001 From: PengpengSun <40026211+PengpengSun@users.noreply.github.com> Date: Thu, 17 Nov 2022 05:37:20 +0800 Subject: VMware: Move Guest Customization transport from OVF to VMware (#1573) This change is moving VMware Guest Customization data transport to VMware datasource, the goal is to have a single datasource for VMware which is named DatasourceVMware. Besides Guest Customization data transport(by local file), VMware datasource already has other data transports, like Guestinfo Keys and environment arguments. The detailed changes are: 1. Remove Guest Customization data transport from OVF datasource. 2. Refactor Guest Customization data transport code and add them to VMware datasource, For backward compatibility, Guest Customization data transport is put ahead of the other existing data transport since OVF is ahead of VMware in datasource searching list. 3. Add instance-id support in customization configuration file, so that instance-id can be given from vSphere side, if instance-id is not given, this datasource will read it from the file /sys/class/dmi/id/product_uuid no matter what transport is. 4. Move Guest Customization detection from dscheck_OVF() to dscheck_VMmare() in ds_identify script, Guest Customization detection is ahead of the other existing data transports since OVF is ahead of VMware in datasource searching list. 5. Modify unittests according to above changes. 6. Modify both OVF and VMware datasource documents according to above changes. 7. Guestinfo OVF data transport is left to OVF datasource since it uses OVF which is same with iso9660 transport. --- cloudinit/cmd/devel/net_convert.py | 10 +- cloudinit/sources/DataSourceOVF.py | 554 --------------- cloudinit/sources/DataSourceVMware.py | 196 +++++- cloudinit/sources/helpers/vmware/imc/config.py | 6 + .../sources/helpers/vmware/imc/guestcust_util.py | 470 ++++++++++++- doc/rtd/topics/datasources/ovf.rst | 32 - doc/rtd/topics/datasources/vmware.rst | 90 ++- tests/data/vmware/cust-dhcp-2nic-instance-id.cfg | 37 + tests/unittests/sources/test_ovf.py | 745 +-------------------- tests/unittests/sources/test_vmware.py | 709 +++++++++++++++++++- .../sources/vmware/test_vmware_config_file.py | 44 +- tests/unittests/test_ds_identify.py | 121 ++-- tools/ds-identify | 9 +- 13 files changed, 1561 insertions(+), 1462 deletions(-) create mode 100644 tests/data/vmware/cust-dhcp-2nic-instance-id.cfg diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 269d72cd..eee49860 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -19,8 +19,8 @@ from cloudinit.net import ( sysconfig, ) from cloudinit.sources import DataSourceAzure as azure -from cloudinit.sources import DataSourceOVF as ovf from cloudinit.sources.helpers import openstack +from cloudinit.sources.helpers.vmware.imc import guestcust_util NAME = "net-convert" @@ -130,8 +130,12 @@ def handle_args(name, args): json.loads(net_data)["network"] ) elif args.kind == "vmware-imc": - config = ovf.Config(ovf.ConfigFile(args.network_data.name)) - pre_ns = ovf.get_network_config_from_conf(config, False) + config = guestcust_util.Config( + guestcust_util.ConfigFile(args.network_data.name) + ) + pre_ns = guestcust_util.get_network_data_from_vmware_cust_cfg( + config, False + ) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 05bf84c2..7baef3a5 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -11,49 +11,13 @@ import base64 import os import re -import time from xml.dom import minidom -from cloudinit import dmi from cloudinit import log as logging from cloudinit import safeyaml, sources, subp, util -from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( - PostCustomScript, - PreCustomScript, -) -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile -from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator -from cloudinit.sources.helpers.vmware.imc.config_passwd import ( - PasswordConfigurator, -) -from cloudinit.sources.helpers.vmware.imc.guestcust_error import ( - GuestCustErrorEnum, -) -from cloudinit.sources.helpers.vmware.imc.guestcust_event import ( - GuestCustEventEnum as GuestCustEvent, -) -from cloudinit.sources.helpers.vmware.imc.guestcust_state import ( - GuestCustStateEnum, -) -from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( - enable_nics, - get_nics_to_enable, - get_tools_config, - set_customization_status, - set_gc_status, -) LOG = logging.getLogger(__name__) -CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" -GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" -VMWARE_IMC_DIR = "/var/run/vmware-imc" - - -class GuestCustScriptDisabled(Exception): - pass - class DataSourceOVF(sources.DataSource): @@ -66,11 +30,7 @@ class DataSourceOVF(sources.DataSource): self.environment = None self.cfg = {} self.supported_seed_starts = ("/", "file://") - self.vmware_customization_supported = True self._network_config = None - self._vmware_nics_to_enable = None - self._vmware_cust_conf = None - self._vmware_cust_found = False def __str__(self): root = sources.DataSource.__str__(self) @@ -81,8 +41,6 @@ class DataSourceOVF(sources.DataSource): md = {} ud = "" vd = "" - vmwareImcConfigFilePath = None - nicspath = None defaults = { "instance-id": "iid-dsovf", @@ -90,305 +48,12 @@ class DataSourceOVF(sources.DataSource): (seedfile, contents) = get_ovf_env(self.paths.seed_dir) - system_type = dmi.read_dmi_data("system-product-name") - if system_type is None: - LOG.debug("No system-product-name found") - if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) - elif system_type and "vmware" in system_type.lower(): - LOG.debug("VMware Virtualization Platform found") - allow_vmware_cust = False - allow_raw_data = False - if not self.vmware_customization_supported: - LOG.debug( - "Skipping the check for VMware Customization support" - ) - else: - allow_vmware_cust = not util.get_cfg_option_bool( - self.sys_cfg, "disable_vmware_customization", True - ) - allow_raw_data = util.get_cfg_option_bool( - self.ds_cfg, "allow_raw_data", True - ) - - if not (allow_vmware_cust or allow_raw_data): - LOG.debug("Customization for VMware platform is disabled.") - else: - search_paths = ( - "/usr/lib/vmware-tools", - "/usr/lib64/vmware-tools", - "/usr/lib/open-vm-tools", - "/usr/lib64/open-vm-tools", - "/usr/lib/x86_64-linux-gnu/open-vm-tools", - "/usr/lib/aarch64-linux-gnu/open-vm-tools", - ) - - plugin = "libdeployPkgPlugin.so" - deployPkgPluginPath = None - for path in search_paths: - deployPkgPluginPath = search_file(path, plugin) - if deployPkgPluginPath: - LOG.debug( - "Found the customization plugin at %s", - deployPkgPluginPath, - ) - break - - if deployPkgPluginPath: - # When the VM is powered on, the "VMware Tools" daemon - # copies the customization specification file to - # /var/run/vmware-imc directory. cloud-init code needs - # to search for the file in that directory which indicates - # that required metadata and userdata files are now - # present. - max_wait = get_max_wait_from_cfg(self.ds_cfg) - vmwareImcConfigFilePath = util.log_time( - logfunc=LOG.debug, - msg="waiting for configuration file", - func=wait_for_imc_cfg_file, - args=("cust.cfg", max_wait), - ) - else: - LOG.debug("Did not find the customization plugin.") - - md_path = None - if vmwareImcConfigFilePath: - imcdirpath = os.path.dirname(vmwareImcConfigFilePath) - cf = ConfigFile(vmwareImcConfigFilePath) - self._vmware_cust_conf = Config(cf) - LOG.debug( - "Found VMware Customization Config File at %s", - vmwareImcConfigFilePath, - ) - try: - (md_path, ud_path, nicspath) = collect_imc_file_paths( - self._vmware_cust_conf - ) - except FileNotFoundError as e: - _raise_error_status( - "File(s) missing in directory", - e, - GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - # Don't handle the customization for below 2 cases: - # 1. meta data is found, allow_raw_data is False. - # 2. no meta data is found, allow_vmware_cust is False. - if md_path and not allow_raw_data: - LOG.debug("Customization using raw data is disabled.") - # reset vmwareImcConfigFilePath to None to avoid - # customization for VMware platform - vmwareImcConfigFilePath = None - if md_path is None and not allow_vmware_cust: - LOG.debug( - "Customization using VMware config is disabled." - ) - vmwareImcConfigFilePath = None - else: - LOG.debug("Did not find VMware Customization Config File") - - use_raw_data = bool(vmwareImcConfigFilePath and md_path) - if use_raw_data: - set_gc_status(self._vmware_cust_conf, "Started") - LOG.debug("Start to load cloud-init meta data and user data") - try: - (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path) - - if network: - self._network_config = network - else: - self._network_config = ( - self.distro.generate_fallback_config() - ) - - except safeyaml.YAMLError as e: - _raise_error_status( - "Error parsing the cloud-init meta data", - e, - GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - except Exception as e: - _raise_error_status( - "Error loading cloud-init configuration", - e, - GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - - self._vmware_cust_found = True - found.append("vmware-tools") - - util.del_dir(imcdirpath) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_DONE, - GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS, - ) - set_gc_status(self._vmware_cust_conf, "Successful") - - elif vmwareImcConfigFilePath: - # Load configuration from vmware_imc - self._vmware_nics_to_enable = "" - try: - set_gc_status(self._vmware_cust_conf, "Started") - - (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) - self._vmware_nics_to_enable = get_nics_to_enable(nicspath) - product_marker = self._vmware_cust_conf.marker_id - hasmarkerfile = check_marker_exists( - product_marker, os.path.join(self.paths.cloud_dir, "data") - ) - special_customization = product_marker and not hasmarkerfile - customscript = self._vmware_cust_conf.custom_script_name - - # In case there is a custom script, check whether VMware - # Tools configuration allow the custom script to run. - if special_customization and customscript: - defVal = "false" - if self._vmware_cust_conf.default_run_post_script: - LOG.debug( - "Set default value to true due to" - " customization configuration." - ) - defVal = "true" - - custScriptConfig = get_tools_config( - CONFGROUPNAME_GUESTCUSTOMIZATION, - GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, - defVal, - ) - if custScriptConfig.lower() != "true": - # Update the customization status if custom script - # is disabled - msg = "Custom script is disabled by VM Administrator" - LOG.debug(msg) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED, - ) - raise GuestCustScriptDisabled(msg) - - ccScriptsDir = os.path.join( - self.paths.get_cpath("scripts"), "per-instance" - ) - except GuestCustScriptDisabled as e: - LOG.debug("GuestCustScriptDisabled") - _raise_error_status( - "Error parsing the customization Config File", - e, - GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - except Exception as e: - _raise_error_status( - "Error parsing the customization Config File", - e, - GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - - if special_customization: - if customscript: - try: - precust = PreCustomScript(customscript, imcdirpath) - precust.execute() - except Exception as e: - _raise_error_status( - "Error executing pre-customization script", - e, - GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - - try: - LOG.debug("Preparing the Network configuration") - self._network_config = get_network_config_from_conf( - self._vmware_cust_conf, True, True, self.distro.osfamily - ) - except Exception as e: - _raise_error_status( - "Error preparing Network Configuration", - e, - GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - - if special_customization: - LOG.debug("Applying password customization") - pwdConfigurator = PasswordConfigurator() - adminpwd = self._vmware_cust_conf.admin_password - try: - resetpwd = self._vmware_cust_conf.reset_password - if adminpwd or resetpwd: - pwdConfigurator.configure( - adminpwd, resetpwd, self.distro - ) - else: - LOG.debug("Changing password is not needed") - except Exception as e: - _raise_error_status( - "Error applying Password Configuration", - e, - GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - - if customscript: - try: - postcust = PostCustomScript( - customscript, imcdirpath, ccScriptsDir - ) - postcust.execute() - except Exception as e: - _raise_error_status( - "Error executing post-customization script", - e, - GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - - if product_marker: - try: - setup_marker_files( - product_marker, - os.path.join(self.paths.cloud_dir, "data"), - ) - except Exception as e: - _raise_error_status( - "Error creating marker files", - e, - GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, - vmwareImcConfigFilePath, - self._vmware_cust_conf, - ) - - self._vmware_cust_found = True - found.append("vmware-tools") - - # TODO: Need to set the status to DONE only when the - # customization is done successfully. - util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) - enable_nics(self._vmware_nics_to_enable) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_DONE, - GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS, - ) - set_gc_status(self._vmware_cust_conf, "Successful") - else: np = [ ("com.vmware.guestInfo", transport_vmware_guestinfo), @@ -438,9 +103,6 @@ class DataSourceOVF(sources.DataSource): return True def _get_subplatform(self): - system_type = dmi.read_dmi_data("system-product-name").lower() - if system_type == "vmware": - return "vmware (%s)" % self.seed return "ovf (%s)" % self.seed def get_public_ssh_keys(self): @@ -468,94 +130,6 @@ class DataSourceOVFNet(DataSourceOVF): DataSourceOVF.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, "ovf-net") self.supported_seed_starts = ("http://", "https://") - self.vmware_customization_supported = False - - -def get_max_wait_from_cfg(cfg): - default_max_wait = 15 - max_wait_cfg_option = "vmware_cust_file_max_wait" - max_wait = default_max_wait - - if not cfg: - return max_wait - - try: - max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait)) - except ValueError: - LOG.warning( - "Failed to get '%s', using %s", - max_wait_cfg_option, - default_max_wait, - ) - - if max_wait < 0: - LOG.warning( - "Invalid value '%s' for '%s', using '%s' instead", - max_wait, - max_wait_cfg_option, - default_max_wait, - ) - max_wait = default_max_wait - - return max_wait - - -def wait_for_imc_cfg_file( - filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc" -): - waited = 0 - if maxwait <= naplen: - naplen = 1 - - while waited < maxwait: - fileFullPath = os.path.join(dirpath, filename) - if os.path.isfile(fileFullPath): - return fileFullPath - LOG.debug("Waiting for VMware Customization Config File") - time.sleep(naplen) - waited += naplen - return None - - -def get_network_config_from_conf( - config, use_system_devices=True, configure=False, osfamily=None -): - nicConfigurator = NicConfigurator(config.nics, use_system_devices) - nics_cfg_list = nicConfigurator.generate(configure, osfamily) - - return get_network_config( - nics_cfg_list, config.name_servers, config.dns_suffixes - ) - - -def get_network_config(nics=None, nameservers=None, search=None): - config_list = nics - - if nameservers or search: - config_list.append( - {"type": "nameserver", "address": nameservers, "search": search} - ) - - return {"version": 1, "config": config_list} - - -# This will return a dict with some content -# meta-data, user-data, some config -def read_vmware_imc(config): - md = {} - cfg = {} - ud = None - if config.host_name: - if config.domain_name: - md["local-hostname"] = config.host_name + "." + config.domain_name - else: - md["local-hostname"] = config.host_name - - if config.timezone: - cfg["timezone"] = config.timezone - - md["instance-id"] = "iid-vmware-imc" - return (md, ud, cfg) # This will return a dict with some content @@ -745,17 +319,6 @@ def get_properties(contents): return props -def search_file(dirpath, filename): - if not dirpath or not filename: - return None - - for root, _dirs, files in os.walk(dirpath): - if filename in files: - return os.path.join(root, filename) - - return None - - class XmlError(Exception): pass @@ -772,80 +335,6 @@ def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) -# To check if marker file exists -def check_marker_exists(markerid, marker_dir): - """ - Check the existence of a marker file. - Presence of marker file determines whether a certain code path is to be - executed. It is needed for partial guest customization in VMware. - @param markerid: is an unique string representing a particular product - marker. - @param: marker_dir: The directory in which markers exist. - """ - if not markerid: - return False - markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") - if os.path.exists(markerfile): - return True - return False - - -# Create a marker file -def setup_marker_files(markerid, marker_dir): - """ - Create a new marker file. - Marker files are unique to a full customization workflow in VMware - environment. - @param markerid: is an unique string representing a particular product - marker. - @param: marker_dir: The directory in which markers exist. - - """ - LOG.debug("Handle marker creation") - markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") - for fname in os.listdir(marker_dir): - if fname.startswith(".markerfile"): - util.del_file(os.path.join(marker_dir, fname)) - open(markerfile, "w").close() - - -def _raise_error_status(prefix, error, event, config_file, conf): - """ - Raise error and send customization status to the underlying VMware - Virtualization Platform. Also, cleanup the imc directory. - """ - LOG.debug("%s: %s", prefix, error) - set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event) - set_gc_status(conf, prefix) - util.del_dir(os.path.dirname(config_file)) - raise error - - -def load_cloudinit_data(md_path, ud_path): - """ - Load the cloud-init meta data, user data, cfg and network from the - given files - - @return: 4-tuple of configuration - metadata, userdata, cfg={}, network - - @raises: FileNotFoundError if md_path or ud_path are absent - """ - LOG.debug("load meta data from: %s: user data from: %s", md_path, ud_path) - md = {} - ud = None - network = None - - md = safeload_yaml_or_dict(util.load_file(md_path)) - - if "network" in md: - network = md["network"] - - if ud_path: - ud = util.load_file(ud_path).replace("\r", "") - return md, ud, {}, network - - def safeload_yaml_or_dict(data): """ The meta data could be JSON or YAML. Since YAML is a strict superset of @@ -857,47 +346,4 @@ def safeload_yaml_or_dict(data): return safeyaml.load(data) -def collect_imc_file_paths(cust_conf): - """ - collect all the other imc files. - - metadata is preferred to nics.txt configuration data. - - If metadata file exists because it is specified in customization - configuration, then metadata is required and userdata is optional. - - @return a 3-tuple containing desired configuration file paths if present - Expected returns: - 1. user provided metadata and userdata (md_path, ud_path, None) - 2. user provided metadata (md_path, None, None) - 3. user-provided network config (None, None, nics_path) - 4. No config found (None, None, None) - """ - md_path = None - ud_path = None - nics_path = None - md_file = cust_conf.meta_data_name - if md_file: - md_path = os.path.join(VMWARE_IMC_DIR, md_file) - if not os.path.exists(md_path): - raise FileNotFoundError( - "meta data file is not found: %s" % md_path - ) - - ud_file = cust_conf.user_data_name - if ud_file: - ud_path = os.path.join(VMWARE_IMC_DIR, ud_file) - if not os.path.exists(ud_path): - raise FileNotFoundError( - "user data file is not found: %s" % ud_path - ) - else: - nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt") - if not os.path.exists(nics_path): - LOG.debug("%s does not exist.", nics_path) - nics_path = None - - return md_path, ud_path, nics_path - - # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py index 308e02e8..07a80222 100644 --- a/cloudinit/sources/DataSourceVMware.py +++ b/cloudinit/sources/DataSourceVMware.py @@ -1,9 +1,10 @@ # Cloud-Init DataSource for VMware # -# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved. +# Copyright (c) 2018-2022 VMware, Inc. All Rights Reserved. # # Authors: Anish Swaminathan # Andrew Kutz +# Pengpeng Sun # # This file is part of cloud-init. See LICENSE file for license information. @@ -14,6 +15,7 @@ multiple transports types, including: * EnvVars * GuestInfo + * IMC (Guest Customization) Netifaces (https://github.com/al45tair/netifaces) @@ -74,6 +76,7 @@ import netifaces from cloudinit import dmi from cloudinit import log as logging from cloudinit import net, sources, util +from cloudinit.sources.helpers.vmware.imc import guestcust_util from cloudinit.subp import ProcessExecutionError, subp, which PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid" @@ -81,8 +84,10 @@ PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid" LOG = logging.getLogger(__name__) NOVAL = "No value found" +# Data transports names DATA_ACCESS_METHOD_ENVVAR = "envvar" DATA_ACCESS_METHOD_GUESTINFO = "guestinfo" +DATA_ACCESS_METHOD_IMC = "imc" VMWARE_RPCTOOL = which("vmware-rpctool") REDACT = "redact" @@ -116,14 +121,22 @@ class DataSourceVMware(sources.DataSource): Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2 For example, CentOS 7's official cloud-init package is version - 0.7.9 and does not support Network Config Version 2. However, - this datasource still supports supplying Network Config Version 2 - data as long as the Linux distro's cloud-init package is new - enough to parse the data. + 0.7.9 and does not support Network Config Version 2. - The metadata key "network.encoding" may be used to indicate the - format of the metadata key "network". Valid encodings are base64 - and gzip+base64. + imc transport: + Either Network Config Version 1 or Network Config Version 2 is + supported which depends on the customization type. + For LinuxPrep customization, Network config Version 1 data is + parsed from the customization specification. + For CloudinitPrep customization, Network config Version 2 data + is parsed from the customization specification. + + envvar and guestinfo tranports: + Network Config Version 2 data is supported as long as the Linux + distro's cloud-init package is new enough to parse the data. + The metadata key "network.encoding" may be used to indicate the + format of the metadata key "network". Valid encodings are base64 + and gzip+base64. """ dsname = "VMware" @@ -131,9 +144,27 @@ class DataSourceVMware(sources.DataSource): def __init__(self, sys_cfg, distro, paths, ud_proc=None): sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) + self.cfg = {} self.data_access_method = None self.vmware_rpctool = VMWARE_RPCTOOL + # A list includes all possible data transports, each tuple represents + # one data transport type. This datasource will try to get data from + # each of transports follows the tuples order in this list. + # A tuple has 3 elements which are: + # 1. The transport name + # 2. The function name to get data for the transport + # 3. A boolean tells whether the transport requires VMware platform + self.possible_data_access_method_list = [ + (DATA_ACCESS_METHOD_ENVVAR, self.get_envvar_data_fn, False), + (DATA_ACCESS_METHOD_GUESTINFO, self.get_guestinfo_data_fn, True), + (DATA_ACCESS_METHOD_IMC, self.get_imc_data_fn, True), + ] + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.data_access_method) + def _get_data(self): """ _get_data loads the metadata, userdata, and vendordata from one of @@ -141,6 +172,7 @@ class DataSourceVMware(sources.DataSource): * envvars * guestinfo + * imc Please note when updating this function with support for new data transports, the order should match the order in the dscheck_VMware @@ -152,35 +184,18 @@ class DataSourceVMware(sources.DataSource): # access method. md, ud, vd = None, None, None - # First check to see if there is data via env vars. - if os.environ.get(VMX_GUESTINFO, ""): - md = guestinfo_envvar("metadata") - ud = guestinfo_envvar("userdata") - vd = guestinfo_envvar("vendordata") - + # Crawl data from all possible data transports + for ( + data_access_method, + get_data_fn, + require_vmware_platform, + ) in self.possible_data_access_method_list: + if require_vmware_platform and not is_vmware_platform(): + continue + (md, ud, vd) = get_data_fn() if md or ud or vd: - self.data_access_method = DATA_ACCESS_METHOD_ENVVAR - - # At this point, all additional data transports are valid only on - # a VMware platform. - if not self.data_access_method: - system_type = dmi.read_dmi_data("system-product-name") - if system_type is None: - LOG.debug("No system-product-name found") - return False - if "vmware" not in system_type.lower(): - LOG.debug("Not a VMware platform") - return False - - # If no data was detected, check the guestinfo transport next. - if not self.data_access_method: - if self.vmware_rpctool: - md = guestinfo("metadata", self.vmware_rpctool) - ud = guestinfo("userdata", self.vmware_rpctool) - vd = guestinfo("vendordata", self.vmware_rpctool) - - if md or ud or vd: - self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO + self.data_access_method = data_access_method + break if not self.data_access_method: LOG.error("failed to find a valid data access method") @@ -241,6 +256,8 @@ class DataSourceVMware(sources.DataSource): get_key_name_fn = get_guestinfo_envvar_key_name elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: get_key_name_fn = get_guestinfo_key_name + elif self.data_access_method == DATA_ACCESS_METHOD_IMC: + get_key_name_fn = get_imc_key_name else: return sources.METADATA_UNKNOWN @@ -249,6 +266,12 @@ class DataSourceVMware(sources.DataSource): get_key_name_fn("metadata"), ) + # The data sources' config_obj is a cloud-config formatted + # object that came to it from ways other than cloud-config + # because cloud-config content would be handled elsewhere + def get_config_obj(self): + return self.cfg + @property def network_config(self): if "network" in self.metadata: @@ -292,6 +315,98 @@ class DataSourceVMware(sources.DataSource): if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool) + def get_envvar_data_fn(self): + """ + check to see if there is data via env vars + """ + md, ud, vd = None, None, None + if os.environ.get(VMX_GUESTINFO, ""): + md = guestinfo_envvar("metadata") + ud = guestinfo_envvar("userdata") + vd = guestinfo_envvar("vendordata") + + return (md, ud, vd) + + def get_guestinfo_data_fn(self): + """ + check to see if there is data via the guestinfo transport + """ + md, ud, vd = None, None, None + if self.vmware_rpctool: + md = guestinfo("metadata", self.vmware_rpctool) + ud = guestinfo("userdata", self.vmware_rpctool) + vd = guestinfo("vendordata", self.vmware_rpctool) + + return (md, ud, vd) + + def get_imc_data_fn(self): + """ + check to see if there is data via vmware guest customization + """ + md, ud, vd = None, None, None + + # Check if vmware guest customization is enabled. + allow_vmware_cust = guestcust_util.is_vmware_cust_enabled(self.sys_cfg) + allow_raw_data_cust = guestcust_util.is_raw_data_cust_enabled( + self.ds_cfg + ) + if not allow_vmware_cust and not allow_raw_data_cust: + LOG.debug("Customization for VMware platform is disabled") + return (md, ud, vd) + + # Check if "VMware Tools" plugin is available. + if not guestcust_util.is_cust_plugin_available(): + return (md, ud, vd) + + # Wait for vmware guest customization configuration file. + cust_cfg_file = guestcust_util.get_cust_cfg_file(self.ds_cfg) + if cust_cfg_file is None: + return (md, ud, vd) + + # Check what type of guest customization is this. + cust_cfg_dir = os.path.dirname(cust_cfg_file) + cust_cfg = guestcust_util.parse_cust_cfg(cust_cfg_file) + ( + is_vmware_cust_cfg, + is_raw_data_cust_cfg, + ) = guestcust_util.get_cust_cfg_type(cust_cfg) + + # Get data only if guest customization type and flag matches. + if is_vmware_cust_cfg and allow_vmware_cust: + LOG.debug("Getting data via VMware customization configuration") + (md, ud, vd, self.cfg) = guestcust_util.get_data_from_imc_cust_cfg( + self.paths.cloud_dir, + self.paths.get_cpath("scripts"), + cust_cfg, + cust_cfg_dir, + self.distro, + ) + elif is_raw_data_cust_cfg and allow_raw_data_cust: + LOG.debug( + "Getting data via VMware raw cloudinit data " + "customization configuration" + ) + (md, ud, vd) = guestcust_util.get_data_from_imc_raw_data_cust_cfg( + cust_cfg + ) + else: + LOG.debug("No allowed customization configuration data found") + + # Clean customization configuration file and directory + util.del_dir(cust_cfg_dir) + return (md, ud, vd) + + +def is_vmware_platform(): + system_type = dmi.read_dmi_data("system-product-name") + if system_type is None: + LOG.debug("No system-product-name found") + return False + elif "vmware" not in system_type.lower(): + LOG.debug("Not a VMware platform") + return False + return True + def decode(key, enc_type, data): """ @@ -367,6 +482,10 @@ def handle_returned_guestinfo_val(key, val): return None +def get_imc_key_name(key): + return "vmware-tools" + + def get_guestinfo_key_name(key): return "guestinfo." + key @@ -512,6 +631,9 @@ def load_json_or_yaml(data): """ if not data: return {} + # If data is already a dictionary, here will return it directly. + if isinstance(data, dict): + return data try: return util.load_json(data) except (json.JSONDecodeError, TypeError): @@ -523,6 +645,8 @@ def process_metadata(data): process_metadata processes metadata and loads the optional network configuration. """ + if not data: + return {} network = None if "network" in data: network = data["network"] diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 8b2deb65..df9e5c4b 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -29,6 +29,7 @@ class Config: DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT" CLOUDINIT_META_DATA = "CLOUDINIT|METADATA" CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA" + CLOUDINIT_INSTANCE_ID = "CLOUDINIT|INSTANCE-ID" def __init__(self, configFile): self._configFile = configFile @@ -142,5 +143,10 @@ class Config: """Return the name of cloud-init user data.""" return self._configFile.get(Config.CLOUDINIT_USER_DATA, None) + @property + def instance_id(self): + """Return instance id""" + return self._configFile.get(Config.CLOUDINIT_INSTANCE_ID, None) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index 5b5f02ca..6ffbae40 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -1,7 +1,8 @@ # Copyright (C) 2016 Canonical Ltd. -# Copyright (C) 2016 VMware Inc. +# Copyright (C) 2016-2022 VMware Inc. # # Author: Sankar Tanguturi +# Pengpeng Sun # # This file is part of cloud-init. See LICENSE file for license information. @@ -10,13 +11,20 @@ import os import re import time -from cloudinit import subp -from cloudinit.sources.helpers.vmware.imc.guestcust_event import ( - GuestCustEventEnum, -) -from cloudinit.sources.helpers.vmware.imc.guestcust_state import ( - GuestCustStateEnum, +from cloudinit import subp, util + +from .config import Config +from .config_custom_script import ( + CustomScriptNotFound, + PostCustomScript, + PreCustomScript, ) +from .config_file import ConfigFile +from .config_nic import NicConfigurator +from .config_passwd import PasswordConfigurator +from .guestcust_error import GuestCustErrorEnum +from .guestcust_event import GuestCustEventEnum +from .guestcust_state import GuestCustStateEnum logger = logging.getLogger(__name__) @@ -24,6 +32,11 @@ logger = logging.getLogger(__name__) CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log" QUERY_NICS_SUPPORTED = "queryNicsSupported" NICS_STATUS_CONNECTED = "connected" +# Path to the VMware IMC directory +IMC_DIR_PATH = "/var/run/vmware-imc" +# Customization script configuration in tools conf +IMC_TOOLS_CONF_GROUPNAME = "deployPkg" +IMC_TOOLS_CONF_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" # This will send a RPC command to the underlying @@ -183,4 +196,447 @@ def set_gc_status(config, gcMsg): return None +def get_imc_dir_path(): + return IMC_DIR_PATH + + +def get_data_from_imc_cust_cfg( + cloud_dir, + scripts_cpath, + cust_cfg, + cust_cfg_dir, + distro, +): + md, ud, vd, cfg = {}, None, None, {} + set_gc_status(cust_cfg, "Started") + (md, cfg) = get_non_network_data_from_vmware_cust_cfg(cust_cfg) + is_special_customization = check_markers(cloud_dir, cust_cfg) + if is_special_customization: + if not do_special_customization( + scripts_cpath, cust_cfg, cust_cfg_dir, distro + ): + return (None, None, None, None) + if not recheck_markers(cloud_dir, cust_cfg): + return (None, None, None, None) + try: + logger.debug("Preparing the Network configuration") + md["network"] = get_network_data_from_vmware_cust_cfg( + cust_cfg, True, True, distro.osfamily + ) + except Exception as e: + set_cust_error_status( + "Error preparing Network Configuration", + str(e), + GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, + cust_cfg, + ) + return (None, None, None, None) + connect_nics(cust_cfg_dir) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_DONE, + GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS, + ) + set_gc_status(cust_cfg, "Successful") + return (md, ud, vd, cfg) + + +def get_data_from_imc_raw_data_cust_cfg(cust_cfg): + set_gc_status(cust_cfg, "Started") + md, ud, vd = None, None, None + md_file = cust_cfg.meta_data_name + if md_file: + md_path = os.path.join(get_imc_dir_path(), md_file) + if not os.path.exists(md_path): + set_cust_error_status( + "Error locating the cloud-init meta data file", + "Meta data file is not found: %s" % md_path, + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return (None, None, None) + try: + md = util.load_file(md_path) + except Exception as e: + set_cust_error_status( + "Error loading cloud-init meta data file", + str(e), + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return (None, None, None) + + ud_file = cust_cfg.user_data_name + if ud_file: + ud_path = os.path.join(get_imc_dir_path(), ud_file) + if not os.path.exists(ud_path): + set_cust_error_status( + "Error locating the cloud-init userdata file", + "Userdata file is not found: %s" % ud_path, + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return (None, None, None) + try: + ud = util.load_file(ud_path).replace("\r", "") + except Exception as e: + set_cust_error_status( + "Error loading cloud-init userdata file", + str(e), + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return (None, None, None) + + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_DONE, + GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS, + ) + set_gc_status(cust_cfg, "Successful") + return (md, ud, vd) + + +def get_non_network_data_from_vmware_cust_cfg(cust_cfg): + md, cfg = {}, {} + if cust_cfg.host_name: + if cust_cfg.domain_name: + md["local-hostname"] = ( + cust_cfg.host_name + "." + cust_cfg.domain_name + ) + else: + md["local-hostname"] = cust_cfg.host_name + if cust_cfg.timezone: + cfg["timezone"] = cust_cfg.timezone + if cust_cfg.instance_id: + md["instance-id"] = cust_cfg.instance_id + return (md, cfg) + + +def get_network_data_from_vmware_cust_cfg( + cust_cfg, use_system_devices=True, configure=False, osfamily=None +): + nicConfigurator = NicConfigurator(cust_cfg.nics, use_system_devices) + nics_cfg_list = nicConfigurator.generate(configure, osfamily) + + return get_v1_network_config( + nics_cfg_list, cust_cfg.name_servers, cust_cfg.dns_suffixes + ) + + +def get_v1_network_config(nics_cfg_list=None, nameservers=None, search=None): + config_list = nics_cfg_list + + if nameservers or search: + config_list.append( + {"type": "nameserver", "address": nameservers, "search": search} + ) + + return {"version": 1, "config": config_list} + + +def connect_nics(cust_cfg_dir): + nics_file = os.path.join(cust_cfg_dir, "nics.txt") + if os.path.exists(nics_file): + logger.debug("%s file found, to connect nics", nics_file) + enable_nics(get_nics_to_enable(nics_file)) + + +def is_vmware_cust_enabled(sys_cfg): + return not util.get_cfg_option_bool( + sys_cfg, "disable_vmware_customization", True + ) + + +def is_raw_data_cust_enabled(ds_cfg): + return util.get_cfg_option_bool(ds_cfg, "allow_raw_data", True) + + +def get_cust_cfg_file(ds_cfg): + # When the VM is powered on, the "VMware Tools" daemon + # copies the customization specification file to + # /var/run/vmware-imc directory. cloud-init code needs + # to search for the file in that directory which indicates + # that required metadata and userdata files are now + # present. + max_wait = get_max_wait_from_cfg(ds_cfg) + cust_cfg_file_path = util.log_time( + logfunc=logger.debug, + msg="Waiting for VMware customization configuration file", + func=wait_for_cust_cfg_file, + args=("cust.cfg", max_wait), + ) + if cust_cfg_file_path: + logger.debug( + "Found VMware customization configuration file at %s", + cust_cfg_file_path, + ) + return cust_cfg_file_path + else: + logger.debug("No VMware customization configuration file found") + return None + + +def wait_for_cust_cfg_file( + filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc" +): + waited = 0 + if maxwait <= naplen: + naplen = 1 + + while waited < maxwait: + fileFullPath = os.path.join(dirpath, filename) + if os.path.isfile(fileFullPath): + return fileFullPath + logger.debug("Waiting for VMware customization configuration file") + time.sleep(naplen) + waited += naplen + return None + + +def get_max_wait_from_cfg(ds_cfg): + default_max_wait = 15 + max_wait_cfg_option = "vmware_cust_file_max_wait" + max_wait = default_max_wait + if not ds_cfg: + return max_wait + try: + max_wait = int(ds_cfg.get(max_wait_cfg_option, default_max_wait)) + except ValueError: + logger.warning( + "Failed to get '%s', using %s", + max_wait_cfg_option, + default_max_wait, + ) + if max_wait < 0: + logger.warning( + "Invalid value '%s' for '%s', using '%s' instead", + max_wait, + max_wait_cfg_option, + default_max_wait, + ) + max_wait = default_max_wait + return max_wait + + +def check_markers(cloud_dir, cust_cfg): + product_marker = cust_cfg.marker_id + has_marker_file = check_marker_exists( + product_marker, os.path.join(cloud_dir, "data") + ) + return product_marker and not has_marker_file + + +def check_marker_exists(markerid, marker_dir): + """ + Check the existence of a marker file. + Presence of marker file determines whether a certain code path is to be + executed. It is needed for partial guest customization in VMware. + @param markerid: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. + """ + if not markerid: + return False + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") + if os.path.exists(markerfile): + return True + return False + + +def recheck_markers(cloud_dir, cust_cfg): + product_marker = cust_cfg.marker_id + if product_marker: + if not create_marker_file(cloud_dir, cust_cfg): + return False + return True + + +def create_marker_file(cloud_dir, cust_cfg): + try: + setup_marker_files(cust_cfg.marker_id, os.path.join(cloud_dir, "data")) + except Exception as e: + set_cust_error_status( + "Error creating marker files", + str(e), + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return False + return True + + +def setup_marker_files(marker_id, marker_dir): + """ + Create a new marker file. + Marker files are unique to a full customization workflow in VMware + environment. + @param marker_id: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. + """ + logger.debug("Handle marker creation") + marker_file = os.path.join(marker_dir, ".markerfile-" + marker_id + ".txt") + for fname in os.listdir(marker_dir): + if fname.startswith(".markerfile"): + util.del_file(os.path.join(marker_dir, fname)) + open(marker_file, "w").close() + + +def do_special_customization(scripts_cpath, cust_cfg, cust_cfg_dir, distro): + is_pre_custom_successful = False + is_password_custom_successful = False + is_post_custom_successful = False + is_custom_script_enabled = False + custom_script = cust_cfg.custom_script_name + if custom_script: + is_custom_script_enabled = check_custom_script_enablement(cust_cfg) + if is_custom_script_enabled: + is_pre_custom_successful = do_pre_custom_script( + cust_cfg, custom_script, cust_cfg_dir + ) + is_password_custom_successful = do_password_customization(cust_cfg, distro) + if custom_script and is_custom_script_enabled: + ccScriptsDir = os.path.join(scripts_cpath, "per-instance") + is_post_custom_successful = do_post_custom_script( + cust_cfg, custom_script, cust_cfg_dir, ccScriptsDir + ) + if custom_script: + return ( + is_pre_custom_successful + and is_password_custom_successful + and is_post_custom_successful + ) + return is_password_custom_successful + + +def do_pre_custom_script(cust_cfg, custom_script, cust_cfg_dir): + try: + precust = PreCustomScript(custom_script, cust_cfg_dir) + precust.execute() + except CustomScriptNotFound as e: + set_cust_error_status( + "Error executing pre-customization script", + str(e), + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return False + return True + + +def do_post_custom_script(cust_cfg, custom_script, cust_cfg_dir, ccScriptsDir): + try: + postcust = PostCustomScript(custom_script, cust_cfg_dir, ccScriptsDir) + postcust.execute() + except CustomScriptNotFound as e: + set_cust_error_status( + "Error executing post-customization script", + str(e), + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return False + return True + + +def check_custom_script_enablement(cust_cfg): + is_custom_script_enabled = False + default_value = "false" + if cust_cfg.default_run_post_script: + logger.debug( + "Set default value to true due to customization configuration." + ) + default_value = "true" + custom_script_enablement = get_tools_config( + IMC_TOOLS_CONF_GROUPNAME, + IMC_TOOLS_CONF_ENABLE_CUST_SCRIPTS, + default_value, + ) + if custom_script_enablement.lower() != "true": + set_cust_error_status( + "Custom script is disabled by VM Administrator", + "Error checking custom script enablement", + GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED, + cust_cfg, + ) + else: + is_custom_script_enabled = True + return is_custom_script_enabled + + +def do_password_customization(cust_cfg, distro): + logger.debug("Applying password customization") + pwdConfigurator = PasswordConfigurator() + admin_pwd = cust_cfg.admin_password + try: + reset_pwd = cust_cfg.reset_password + if admin_pwd or reset_pwd: + pwdConfigurator.configure(admin_pwd, reset_pwd, distro) + else: + logger.debug("Changing password is not needed") + except Exception as e: + set_cust_error_status( + "Error applying password configuration", + str(e), + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + cust_cfg, + ) + return False + return True + + +def parse_cust_cfg(cfg_file): + return Config(ConfigFile(cfg_file)) + + +def get_cust_cfg_type(cust_cfg): + is_vmware_cust_cfg, is_raw_data_cust_cfg = False, False + if cust_cfg.meta_data_name: + is_raw_data_cust_cfg = True + logger.debug("raw cloudinit data cust cfg found") + else: + is_vmware_cust_cfg = True + logger.debug("vmware cust cfg found") + return (is_vmware_cust_cfg, is_raw_data_cust_cfg) + + +def is_cust_plugin_available(): + search_paths = ( + "/usr/lib/vmware-tools", + "/usr/lib64/vmware-tools", + "/usr/lib/open-vm-tools", + "/usr/lib64/open-vm-tools", + "/usr/lib/x86_64-linux-gnu/open-vm-tools", + "/usr/lib/aarch64-linux-gnu/open-vm-tools", + ) + cust_plugin = "libdeployPkgPlugin.so" + for path in search_paths: + cust_plugin_path = search_file(path, cust_plugin) + if cust_plugin_path: + logger.debug( + "Found the customization plugin at %s", cust_plugin_path + ) + return True + return False + + +def search_file(dirpath, filename): + if not dirpath or not filename: + return None + + for root, _dirs, files in os.walk(dirpath): + if filename in files: + return os.path.join(root, filename) + + return None + + +def set_cust_error_status(prefix, error, event, cust_cfg): + """ + Set customization status to the underlying VMware Virtualization Platform + """ + util.logexc(logger, "%s: %s", prefix, error) + set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event) + set_gc_status(cust_cfg, prefix) + + # vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst index d6eb75da..9f248476 100644 --- a/doc/rtd/topics/datasources/ovf.rst +++ b/doc/rtd/topics/datasources/ovf.rst @@ -11,36 +11,4 @@ transport. For further information see a full working example in cloud-init's source code tree in doc/sources/ovf -Configuration -------------- -The following configuration can be set for the datasource in system -configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). - -The settings that may be configured are: - - * disable_vmware_customization: disable or enable the vmware customization - based on vmware customization files. (default: True) - * allow_raw_data: enable or disable the vmware customization based on raw - cloud-init data including metadata and userdata. (default: True) - * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that - should be spent waiting for vmware customization files. (default: 15) - - -On VMware platforms, VMTools use is required for OVF datasource configuration -settings as well as vCloud and vSphere admin configuration. User could change -the VMTools configuration options with command:: - - vmware-toolbox-cmd config set
- -The following VMTools configuration options affect cloud-init's behavior on a booted VM: - * a: [deploypkg] enable-custom-scripts - If this option is absent in VMTools configuration, the custom script is - disabled by default for security reasons. Some VMware products could - change this default behavior (for example: enabled by default) via - customization specification settings. - -VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings. - -For more information, see `VMware vSphere Product Documentation `_ and specific VMTools parameters consumed. - .. vi: textwidth=79 diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst index de3de6af..b45ff3cc 100644 --- a/doc/rtd/topics/datasources/vmware.rst +++ b/doc/rtd/topics/datasources/vmware.rst @@ -7,6 +7,7 @@ This datasource is for use with systems running on a VMware platform such as vSphere and currently supports the following data transports: +* `Guest OS Customization `_ * `GuestInfo `_ keys Configuration @@ -14,6 +15,82 @@ Configuration The configuration method is dependent upon the transport: +Guest OS Customization +^^^^^^^^^^^^^^^^^^^^^^ + +The following configuration can be set for this datasource in cloud-init +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). + +System configuration: + +``disable_vmware_customization``: +true(disable) or false(enable) the vmware traditional Linux guest customization. Traditional Linux guest customization is customizing a Linux virtual machine with a `traditional Linux customization specification `_. Also set this configuration to false is required to make sure this datasource is found in ds-identify when using guest os customization transport. (default: true) + +Datasource configuration: + +``allow_raw_data``: +true(enable) or false(disable) the vmware customization using cloud-init +metadata and userdata directly. Since vSphere 7.0 Update 3 version, user +can create a Linux customizatino specification with barely cloud-init +metadata and userdata and apply this kind specification to a virtual +machine, this datasource will parse the metadata and userdata and +configure virtual machine with them. See `Guest Customization Using +cloud-init `_ (default: true) + +``vmware_cust_file_max_wait``: +The maximum amount of clock time in seconds that should be spent waiting +for vmware customization files. (default: 15) + +Configuration examples: + +1. Create a file /etc/cloud/cloud.cfg.d/99-vmware-guest-customization.cfg with + below content will enable vmware customization, and set the maximum time of + waiting for vmware customization file to 10 seconds: + +.. code-block:: yaml + + disable_vmware_customization: false + datasource: + VMware: + vmware_cust_file_max_wait: 10 + +2. Create a file /etc/cloud/cloud.cfg.d/99-vmware-guest-customization.cfg with + below content will enable vmware customization, but only try to apply a + traditional Linux guest customization configuration, and set the maximum time of + waiting for vmware customization file to 10 seconds: + +.. code-block:: yaml + + disable_vmware_customization: false + datasource: + VMware: + allow_raw_data: false + vmware_cust_file_max_wait: 10 + +VMware Tools configuration: + +`VMware Tools `_ is required for this datasource configuration +settings as well as vCloud and vSphere admin configuration. User could change +the VMware Tools configuration options with command: + +.. code-block:: shell + + vmware-toolbox-cmd config set
+ +The following VMware Tools configuration option affects this datasource's +behavior when applying customization configuration with custom script: + +``[deploypkg] enable-custom-scripts``: + +If this option is absent in VMware Tools configuration, the custom script is +disabled by default for security reasons. Some VMware products could change +this default behavior (for example: enabled by default) via customization +specification settings. + +VMware admin can refer to `customization configuration `_ and set the customization specification settings. + +For more information, see `VMware vSphere Product Documentation `_ and specific VMware Tools configuration options. + GuestInfo Keys ^^^^^^^^^^^^^^ @@ -45,8 +122,7 @@ All ``guestinfo.*.encoding`` values may be set to ``base64`` or Features -------- -This section reviews several features available in this datasource, regardless -of how the meta, user, and vendor data was discovered. +This section reviews several features available in this datasource. Instance data and lazy networks ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -132,8 +208,8 @@ The above command will result in output similar to the below JSON: } -Redacting sensitive information -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Redacting sensitive information (GuestInfo keys transport only) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Sometimes the cloud-init userdata might contain sensitive information, and it may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo @@ -202,11 +278,11 @@ If either of the above values are true, then the datasource will sleep for a second, check the network status, and repeat until one or both addresses from the specified families are available. -Walkthrough ------------ +Walkthrough of GuestInfo Transport +--------------------------------------- The following series of steps is a demonstration on how to configure a VM with -this datasource: +this datasource using GuestInfo keys transport: #. Create the metadata file for the VM. Save the following YAML to a file named diff --git a/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg b/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg new file mode 100644 index 00000000..70a9d313 --- /dev/null +++ b/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg @@ -0,0 +1,37 @@ +[NETWORK] +NETWORKING = yes +BOOTPROTO = dhcp +HOSTNAME = myhost1 +DOMAINNAME = eng.vmware.com + +[NIC-CONFIG] +NICS = NIC1,NIC2 + +[NIC1] +MACADDR = 00:50:56:a6:8c:08 +ONBOOT = yes +IPv4_MODE = BACKWARDS_COMPATIBLE +BOOTPROTO = dhcp + +[NIC2] +MACADDR = 00:50:56:a6:5a:de +ONBOOT = yes +IPv4_MODE = BACKWARDS_COMPATIBLE +BOOTPROTO = dhcp + +# some random comment + +[PASSWORD] +# secret +-PASS = c2VjcmV0Cg== + +[DNS] +DNSFROMDHCP=yes +SUFFIX|1 = eng.vmware.com + +[DATETIME] +TIMEZONE = Africa/Abidjan +UTC = yes + +[CLOUDINIT] +INSTANCE-ID = guest-os-customization-uuid diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py index 1fbd564f..109d8889 100644 --- a/tests/unittests/sources/test_ovf.py +++ b/tests/unittests/sources/test_ovf.py @@ -5,19 +5,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import base64 -import os from collections import OrderedDict from textwrap import dedent from cloudinit import subp, util from cloudinit.helpers import Paths -from cloudinit.safeyaml import YAMLError from cloudinit.sources import DataSourceOVF as dsovf -from cloudinit.sources.DataSourceOVF import GuestCustScriptDisabled -from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( - CustomScriptNotFound, -) -from tests.unittests.helpers import CiTestCase, mock, wrap_and_call +from tests.unittests.helpers import CiTestCase, mock MPATH = "cloudinit.sources.DataSourceOVF." @@ -203,34 +197,6 @@ class TestReadOvfEnv(CiTestCase): self.assertIsNone(ud) -class TestMarkerFiles(CiTestCase): - def setUp(self): - super(TestMarkerFiles, self).setUp() - self.tdir = self.tmp_dir() - - def test_false_when_markerid_none(self): - """Return False when markerid provided is None.""" - self.assertFalse( - dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir) - ) - - def test_markerid_file_exist(self): - """Return False when markerid file path does not exist, - True otherwise.""" - self.assertFalse(dsovf.check_marker_exists("123", self.tdir)) - - marker_file = self.tmp_path(".markerfile-123.txt", self.tdir) - util.write_file(marker_file, "") - self.assertTrue(dsovf.check_marker_exists("123", self.tdir)) - - def test_marker_file_setup(self): - """Test creation of marker files.""" - markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir) - self.assertFalse(os.path.exists(markerfilepath)) - dsovf.setup_marker_files(markerid="hi", marker_dir=self.tdir) - self.assertTrue(os.path.exists(markerfilepath)) - - class TestDatasourceOVF(CiTestCase): with_logs = True @@ -240,334 +206,8 @@ class TestDatasourceOVF(CiTestCase): self.datasource = dsovf.DataSourceOVF self.tdir = self.tmp_dir() - def test_get_data_false_on_none_dmi_data(self): - """When dmi for system-product-name is None, get_data returns False.""" - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - retcode = wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": None, - "transport_iso9660": NOT_FOUND, - "transport_vmware_guestinfo": NOT_FOUND, - }, - ds.get_data, - ) - self.assertFalse(retcode, "Expected False return from ds.get_data") - self.assertIn( - "DEBUG: No system-product-name found", self.logs.getvalue() - ) - - def test_get_data_vmware_customization_disabled(self): - """When vmware customization is disabled via sys_cfg and - allow_raw_data is disabled via ds_cfg, log a message. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={ - "disable_vmware_customization": True, - "datasource": {"OVF": {"allow_raw_data": False}}, - }, - distro={}, - paths=paths, - ) - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [MISC] - MARKER-ID = 12345345 - """ - ) - util.write_file(conf_file, conf_content) - retcode = wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "transport_iso9660": NOT_FOUND, - "transport_vmware_guestinfo": NOT_FOUND, - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - }, - ds.get_data, - ) - self.assertFalse(retcode, "Expected False return from ds.get_data") - self.assertIn( - "DEBUG: Customization for VMware platform is disabled.", - self.logs.getvalue(), - ) - - def test_get_data_vmware_customization_sys_cfg_disabled(self): - """When vmware customization is disabled via sys_cfg and - no meta data is found, log a message. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={ - "disable_vmware_customization": True, - "datasource": {"OVF": {"allow_raw_data": True}}, - }, - distro={}, - paths=paths, - ) - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [MISC] - MARKER-ID = 12345345 - """ - ) - util.write_file(conf_file, conf_content) - retcode = wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "transport_iso9660": NOT_FOUND, - "transport_vmware_guestinfo": NOT_FOUND, - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - }, - ds.get_data, - ) - self.assertFalse(retcode, "Expected False return from ds.get_data") - self.assertIn( - "DEBUG: Customization using VMware config is disabled.", - self.logs.getvalue(), - ) - - def test_get_data_allow_raw_data_disabled(self): - """When allow_raw_data is disabled via ds_cfg and - meta data is found, log a message. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={ - "disable_vmware_customization": False, - "datasource": {"OVF": {"allow_raw_data": False}}, - }, - distro={}, - paths=paths, - ) - - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CLOUDINIT] - METADATA = test-meta - """ - ) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path("test-meta", self.tdir) - util.write_file(metadata_file, "This is meta data") - retcode = wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "transport_iso9660": NOT_FOUND, - "transport_vmware_guestinfo": NOT_FOUND, - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "collect_imc_file_paths": [self.tdir + "/test-meta", "", ""], - }, - ds.get_data, - ) - self.assertFalse(retcode, "Expected False return from ds.get_data") - self.assertIn( - "DEBUG: Customization using raw data is disabled.", - self.logs.getvalue(), - ) - - def test_get_data_vmware_customization_enabled(self): - """When cloud-init workflow for vmware is enabled via sys_cfg log a - message. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": False}, - distro={}, - paths=paths, - ) - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345345 - """ - ) - util.write_file(conf_file, conf_content) - with mock.patch(MPATH + "get_tools_config", return_value="true"): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "get_nics_to_enable": "", - }, - ds.get_data, - ) - customscript = self.tmp_path("test-script", self.tdir) - self.assertIn( - "Script %s not found!!" % customscript, str(context.exception) - ) - - def test_get_data_cust_script_disabled(self): - """If custom script is disabled by VMware tools configuration, - raise a RuntimeError. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": False}, - distro={}, - paths=paths, - ) - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - """ - ) - util.write_file(conf_file, conf_content) - # Prepare the custom sript - customscript = self.tmp_path("test-script", self.tdir) - util.write_file(customscript, "This is the post cust script") - - with mock.patch(MPATH + "get_tools_config", return_value="invalid"): - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - with self.assertRaises(GuestCustScriptDisabled) as context: - wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "get_nics_to_enable": "", - }, - ds.get_data, - ) - self.assertIn( - "Custom script is disabled by VM Administrator", - str(context.exception), - ) - - def test_get_data_cust_script_enabled(self): - """If custom script is enabled by VMware tools configuration, - execute the script. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": False}, - distro={}, - paths=paths, - ) - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - """ - ) - util.write_file(conf_file, conf_content) - - # Mock custom script is enabled by return true when calling - # get_tools_config - with mock.patch(MPATH + "get_tools_config", return_value="true"): - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "get_nics_to_enable": "", - }, - ds.get_data, - ) - # Verify custom script is trying to be executed - customscript = self.tmp_path("test-script", self.tdir) - self.assertIn( - "Script %s not found!!" % customscript, str(context.exception) - ) - - def test_get_data_force_run_post_script_is_yes(self): - """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if - enable-custom-scripts is not defined in VM Tools configuration - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": False}, - distro={}, - paths=paths, - ) - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts - # default value is TRUE - conf_content = dedent( - """\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - DEFAULT-RUN-POST-CUST-SCRIPT = yes - """ - ) - util.write_file(conf_file, conf_content) - - # Mock get_tools_config(section, key, defaultVal) to return - # defaultVal - def my_get_tools_config(*args, **kwargs): - return args[2] - - with mock.patch( - MPATH + "get_tools_config", side_effect=my_get_tools_config - ): - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "get_nics_to_enable": "", - }, - ds.get_data, - ) - # Verify custom script still runs although it is - # disabled by VMware Tools - customscript = self.tmp_path("test-script", self.tdir) - self.assertIn( - "Script %s not found!!" % customscript, str(context.exception) - ) - - def test_get_data_non_vmware_seed_platform_info(self): - """Platform info properly reports when on non-vmware platforms.""" + def test_get_data_seed_dir(self): + """Platform info properly reports when getting data from seed dir.""" paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) # Write ovf-env.xml seed file seed_dir = self.tmp_path("seed", dir=self.tdir) @@ -577,37 +217,14 @@ class TestDatasourceOVF(CiTestCase): self.assertEqual("ovf", ds.cloud_name) self.assertEqual("ovf", ds.platform_type) - with mock.patch(MPATH + "dmi.read_dmi_data", return_value="!VMware"): - with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd: - with mock.patch(MPATH + "transport_iso9660") as m_iso9660: - m_iso9660.return_value = NOT_FOUND - m_guestd.return_value = NOT_FOUND - self.assertTrue(ds.get_data()) - self.assertEqual( - "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform - ) - - def test_get_data_vmware_seed_platform_info(self): - """Platform info properly reports when on VMware platform.""" - paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) - # Write ovf-env.xml seed file - seed_dir = self.tmp_path("seed", dir=self.tdir) - ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir) - util.write_file(ovf_env, OVF_ENV_CONTENT) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - - self.assertEqual("ovf", ds.cloud_name) - self.assertEqual("ovf", ds.platform_type) - with mock.patch(MPATH + "dmi.read_dmi_data", return_value="VMWare"): - with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd: - with mock.patch(MPATH + "transport_iso9660") as m_iso9660: - m_iso9660.return_value = NOT_FOUND - m_guestd.return_value = NOT_FOUND - self.assertTrue(ds.get_data()) - self.assertEqual( - "vmware (%s/seed/ovf-env.xml)" % self.tdir, - ds.subplatform, - ) + with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd: + with mock.patch(MPATH + "transport_iso9660") as m_iso9660: + m_iso9660.return_value = NOT_FOUND + m_guestd.return_value = NOT_FOUND + self.assertTrue(ds.get_data()) + self.assertEqual( + "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform + ) @mock.patch("cloudinit.subp.subp") @mock.patch("cloudinit.sources.DataSource.persist_instance_data") @@ -679,346 +296,6 @@ class TestDatasourceOVF(CiTestCase): ds.network_config, ) - def test_get_data_cloudinit_metadata_json(self): - """Test metadata can be loaded to cloud-init metadata and network. - The metadata format is json. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": True}, - distro={}, - paths=paths, - ) - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CLOUDINIT] - METADATA = test-meta - """ - ) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path("test-meta", self.tdir) - metadata_content = dedent( - """\ - { - "instance-id": "cloud-vm", - "local-hostname": "my-host.domain.com", - "network": { - "version": 2, - "ethernets": { - "eths": { - "match": { - "name": "ens*" - }, - "dhcp4": true - } - } - } - } - """ - ) - util.write_file(metadata_file, metadata_content) - - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - result = wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "collect_imc_file_paths": [ - self.tdir + "/test-meta", - "", - "", - ], - "get_nics_to_enable": "", - }, - ds._get_data, - ) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata["instance-id"]) - self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"]) - self.assertEqual(2, ds.network_config["version"]) - self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"]) - - def test_get_data_cloudinit_metadata_yaml(self): - """Test metadata can be loaded to cloud-init metadata and network. - The metadata format is yaml. - """ - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": True}, - distro={}, - paths=paths, - ) - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CLOUDINIT] - METADATA = test-meta - """ - ) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path("test-meta", self.tdir) - metadata_content = dedent( - """\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """ - ) - util.write_file(metadata_file, metadata_content) - - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - result = wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "collect_imc_file_paths": [ - self.tdir + "/test-meta", - "", - "", - ], - "get_nics_to_enable": "", - }, - ds._get_data, - ) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata["instance-id"]) - self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"]) - self.assertEqual(2, ds.network_config["version"]) - self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"]) - - def test_get_data_cloudinit_metadata_not_valid(self): - """Test metadata is not JSON or YAML format.""" - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": True}, - distro={}, - paths=paths, - ) - - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CLOUDINIT] - METADATA = test-meta - """ - ) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path("test-meta", self.tdir) - metadata_content = "[This is not json or yaml format]a=b" - util.write_file(metadata_file, metadata_content) - - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - with self.assertRaises(YAMLError) as context: - wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "collect_imc_file_paths": [ - self.tdir + "/test-meta", - "", - "", - ], - "get_nics_to_enable": "", - }, - ds.get_data, - ) - - self.assertIn( - "expected '', but found ''", - str(context.exception), - ) - - def test_get_data_cloudinit_metadata_not_found(self): - """Test metadata file can't be found.""" - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": True}, - distro={}, - paths=paths, - ) - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CLOUDINIT] - METADATA = test-meta - """ - ) - util.write_file(conf_file, conf_content) - # Don't prepare the meta data file - - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - with self.assertRaises(FileNotFoundError) as context: - wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "get_nics_to_enable": "", - }, - ds.get_data, - ) - - self.assertIn("is not found", str(context.exception)) - - def test_get_data_cloudinit_userdata(self): - """Test user data can be loaded to cloud-init user data.""" - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": False}, - distro={}, - paths=paths, - ) - - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CLOUDINIT] - METADATA = test-meta - USERDATA = test-user - """ - ) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path("test-meta", self.tdir) - metadata_content = dedent( - """\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """ - ) - util.write_file(metadata_file, metadata_content) - - # Prepare the user data file - userdata_file = self.tmp_path("test-user", self.tdir) - userdata_content = "This is the user data" - util.write_file(userdata_file, userdata_content) - - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - result = wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "collect_imc_file_paths": [ - self.tdir + "/test-meta", - self.tdir + "/test-user", - "", - ], - "get_nics_to_enable": "", - }, - ds._get_data, - ) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata["instance-id"]) - self.assertEqual(userdata_content, ds.userdata_raw) - - def test_get_data_cloudinit_userdata_not_found(self): - """Test userdata file can't be found.""" - paths = Paths({"cloud_dir": self.tdir}) - ds = self.datasource( - sys_cfg={"disable_vmware_customization": True}, - distro={}, - paths=paths, - ) - - # Prepare the conf file - conf_file = self.tmp_path("test-cust", self.tdir) - conf_content = dedent( - """\ - [CLOUDINIT] - METADATA = test-meta - USERDATA = test-user - """ - ) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path("test-meta", self.tdir) - metadata_content = dedent( - """\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """ - ) - util.write_file(metadata_file, metadata_content) - - # Don't prepare the user data file - - with mock.patch( - MPATH + "set_customization_status", return_value=("msg", b"") - ): - with self.assertRaises(FileNotFoundError) as context: - wrap_and_call( - "cloudinit.sources.DataSourceOVF", - { - "dmi.read_dmi_data": "vmware", - "util.del_dir": True, - "search_file": self.tdir, - "wait_for_imc_cfg_file": conf_file, - "get_nics_to_enable": "", - }, - ds.get_data, - ) - - self.assertIn("is not found", str(context.exception)) - class TestTransportIso9660(CiTestCase): def setUp(self): diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py index b3663b0a..4911e5bc 100644 --- a/tests/unittests/sources/test_vmware.py +++ b/tests/unittests/sources/test_vmware.py @@ -1,6 +1,7 @@ -# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# Copyright (c) 2021-2022 VMware, Inc. All Rights Reserved. # # Authors: Andrew Kutz +# Pengpeng Sun # # This file is part of cloud-init. See LICENSE file for license information. @@ -8,18 +9,22 @@ import base64 import gzip import os from contextlib import ExitStack +from textwrap import dedent import pytest -from cloudinit import dmi, helpers, safeyaml, settings +from cloudinit import dmi, helpers, safeyaml, settings, util from cloudinit.sources import DataSourceVMware +from cloudinit.sources.helpers.vmware.imc import guestcust_util from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, mock, populate_dir, + wrap_and_call, ) +MPATH = "cloudinit.sources.DataSourceVMware." PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" PRODUCT_NAME = "VMware7,1" PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" @@ -490,6 +495,706 @@ class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): self.assertFalse(ret) +class TestDataSourceVMwareIMC(CiTestCase): + """ + Test the VMware Guest OS Customization transport + """ + + with_logs = True + + def setUp(self): + super(TestDataSourceVMwareIMC, self).setUp() + self.datasource = DataSourceVMware.DataSourceVMware + self.tdir = self.tmp_dir() + + def test_get_data_false_on_none_dmi_data(self): + """When dmi for system-product-name is None, get_data returns False.""" + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": None, + }, + ds.get_data, + ) + self.assertFalse(result, "Expected False return from ds.get_data") + self.assertIn("No system-product-name found", self.logs.getvalue()) + + def test_get_imc_data_vmware_customization_disabled(self): + """ + When vmware customization is disabled via sys_cfg and + allow_raw_data is disabled via ds_cfg, log a message. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={ + "disable_vmware_customization": True, + "datasource": {"VMware": {"allow_raw_data": False}}, + }, + distro={}, + paths=paths, + ) + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [MISC] + MARKER-ID = 12345345 + """ + ) + util.write_file(conf_file, conf_content) + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + self.assertIn( + "Customization for VMware platform is disabled", + self.logs.getvalue(), + ) + + def test_get_imc_data_vmware_customization_sys_cfg_disabled(self): + """ + When vmware customization is disabled via sys_cfg and + no meta data is found, log a message. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={ + "disable_vmware_customization": True, + "datasource": {"VMware": {"allow_raw_data": True}}, + }, + distro={}, + paths=paths, + ) + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [MISC] + MARKER-ID = 12345345 + """ + ) + util.write_file(conf_file, conf_content) + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + self.assertIn( + "No allowed customization configuration data found", + self.logs.getvalue(), + ) + + def test_get_imc_data_allow_raw_data_disabled(self): + """ + When allow_raw_data is disabled via ds_cfg and + meta data is found, log a message. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={ + "disable_vmware_customization": False, + "datasource": {"VMware": {"allow_raw_data": False}}, + }, + distro={}, + paths=paths, + ) + + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CLOUDINIT] + METADATA = test-meta + """ + ) + util.write_file(conf_file, conf_content) + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + self.assertIn( + "No allowed customization configuration data found", + self.logs.getvalue(), + ) + + def test_get_imc_data_vmware_customization_enabled(self): + """ + When cloud-init workflow for vmware is enabled via sys_cfg log a + message. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345345 + """ + ) + util.write_file(conf_file, conf_content) + with mock.patch( + MPATH + "guestcust_util.get_tools_config", + return_value="true", + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + custom_script = self.tmp_path("test-script", self.tdir) + self.assertIn( + "Script %s not found!!" % custom_script, + self.logs.getvalue(), + ) + + def test_get_imc_data_cust_script_disabled(self): + """ + If custom script is disabled by VMware tools configuration, + log a message. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """ + ) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path("test-script", self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch( + MPATH + "guestcust_util.get_tools_config", + return_value="invalid", + ): + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + self.assertIn( + "Custom script is disabled by VM Administrator", + self.logs.getvalue(), + ) + + def test_get_imc_data_cust_script_enabled(self): + """ + If custom script is enabled by VMware tools configuration, + execute the script. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """ + ) + util.write_file(conf_file, conf_content) + + # Mock custom script is enabled by return true when calling + # get_tools_config + with mock.patch( + MPATH + "guestcust_util.get_tools_config", + return_value="true", + ): + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + # Verify custom script is trying to be executed + custom_script = self.tmp_path("test-script", self.tdir) + self.assertIn( + "Script %s not found!!" % custom_script, + self.logs.getvalue(), + ) + + def test_get_imc_data_force_run_post_script_is_yes(self): + """ + If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if + enable-custom-scripts is not defined in VM Tools configuration + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts + # default value is TRUE + conf_content = dedent( + """\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + DEFAULT-RUN-POST-CUST-SCRIPT = yes + """ + ) + util.write_file(conf_file, conf_content) + + # Mock get_tools_config(section, key, defaultVal) to return + # defaultVal + def my_get_tools_config(*args, **kwargs): + return args[2] + + with mock.patch( + MPATH + "guestcust_util.get_tools_config", + side_effect=my_get_tools_config, + ): + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + # Verify custom script still runs although it is + # disabled by VMware Tools + custom_script = self.tmp_path("test-script", self.tdir) + self.assertIn( + "Script %s not found!!" % custom_script, + self.logs.getvalue(), + ) + + def test_get_data_cloudinit_metadata_json(self): + """ + Test metadata can be loaded to cloud-init metadata and network. + The metadata format is json. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CLOUDINIT] + METADATA = test-meta + """ + ) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ + { + "instance-id": "cloud-vm", + "local-hostname": "my-host.domain.com", + "network": { + "version": 2, + "ethernets": { + "eths": { + "match": { + "name": "ens*" + }, + "dhcp4": true + } + } + } + } + """ + ) + util.write_file(metadata_file, metadata_content) + + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + "guestcust_util.get_imc_dir_path": self.tdir, + }, + ds._get_data, + ) + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata["instance-id"]) + self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"]) + self.assertEqual(2, ds.network_config["version"]) + self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"]) + + def test_get_data_cloudinit_metadata_yaml(self): + """ + Test metadata can be loaded to cloud-init metadata and network. + The metadata format is yaml. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CLOUDINIT] + METADATA = test-meta + """ + ) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """ + ) + util.write_file(metadata_file, metadata_content) + + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + "guestcust_util.get_imc_dir_path": self.tdir, + }, + ds._get_data, + ) + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata["instance-id"]) + self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"]) + self.assertEqual(2, ds.network_config["version"]) + self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"]) + + def test_get_imc_data_cloudinit_metadata_not_valid(self): + """ + Test metadata is not JSON or YAML format, log a message + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) + + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CLOUDINIT] + METADATA = test-meta + """ + ) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = "[This is not json or yaml format]a=b" + util.write_file(metadata_file, metadata_content) + + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + "guestcust_util.get_imc_dir_path": self.tdir, + }, + ds.get_data, + ) + self.assertFalse(result) + self.assertIn( + "expected '', but found ''", + self.logs.getvalue(), + ) + + def test_get_imc_data_cloudinit_metadata_not_found(self): + """ + Test metadata file can't be found, log a message + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CLOUDINIT] + METADATA = test-meta + """ + ) + util.write_file(conf_file, conf_content) + # Don't prepare the meta data file + + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + "guestcust_util.get_imc_dir_path": self.tdir, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + self.assertIn("Meta data file is not found", self.logs.getvalue()) + + def test_get_data_cloudinit_userdata(self): + """ + Test user data can be loaded to cloud-init user data. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) + + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """ + ) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """ + ) + util.write_file(metadata_file, metadata_content) + + # Prepare the user data file + userdata_file = self.tmp_path("test-user", self.tdir) + userdata_content = "This is the user data" + util.write_file(userdata_file, userdata_content) + + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + "guestcust_util.get_imc_dir_path": self.tdir, + }, + ds._get_data, + ) + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata["instance-id"]) + self.assertEqual(userdata_content, ds.userdata_raw) + + def test_get_imc_data_cloudinit_userdata_not_found(self): + """ + Test userdata file can't be found. + """ + paths = helpers.Paths({"cloud_dir": self.tdir}) + ds = self.datasource( + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) + + # Prepare the conf file + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """ + ) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """ + ) + util.write_file(metadata_file, metadata_content) + + # Don't prepare the user data file + + with mock.patch( + MPATH + "guestcust_util.set_customization_status", + return_value=("msg", b""), + ): + result = wrap_and_call( + "cloudinit.sources.DataSourceVMware", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "guestcust_util.search_file": self.tdir, + "guestcust_util.wait_for_cust_cfg_file": conf_file, + "guestcust_util.get_imc_dir_path": self.tdir, + }, + ds.get_imc_data_fn, + ) + self.assertEqual(result, (None, None, None)) + self.assertIn("Userdata file is not found", self.logs.getvalue()) + + +class TestDataSourceVMwareIMC_MarkerFiles(CiTestCase): + def setUp(self): + super(TestDataSourceVMwareIMC_MarkerFiles, self).setUp() + self.tdir = self.tmp_dir() + + def test_false_when_markerid_none(self): + """Return False when markerid provided is None.""" + self.assertFalse( + guestcust_util.check_marker_exists( + markerid=None, marker_dir=self.tdir + ) + ) + + def test_markerid_file_exist(self): + """Return False when markerid file path does not exist, + True otherwise.""" + self.assertFalse(guestcust_util.check_marker_exists("123", self.tdir)) + marker_file = self.tmp_path(".markerfile-123.txt", self.tdir) + util.write_file(marker_file, "") + self.assertTrue(guestcust_util.check_marker_exists("123", self.tdir)) + + def test_marker_file_setup(self): + """Test creation of marker files.""" + markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir) + self.assertFalse(os.path.exists(markerfilepath)) + guestcust_util.setup_marker_files(marker_id="hi", marker_dir=self.tdir) + self.assertTrue(os.path.exists(markerfilepath)) + + def assert_metadata(test_obj, ds, metadata): test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) test_obj.assertEqual( diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index 38d45d0e..2fc2e21c 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -1,5 +1,5 @@ # Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2016 VMware INC. +# Copyright (C) 2016-2022 VMware INC. # # Author: Sankar Tanguturi # Pengpeng Sun @@ -12,10 +12,6 @@ import sys import tempfile import textwrap -from cloudinit.sources.DataSourceOVF import ( - get_network_config_from_conf, - read_vmware_imc, -) from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum from cloudinit.sources.helpers.vmware.imc.config import Config from cloudinit.sources.helpers.vmware.imc.config_file import ( @@ -25,6 +21,10 @@ from cloudinit.sources.helpers.vmware.imc.config_nic import ( NicConfigurator, gen_subnet, ) +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_network_data_from_vmware_cust_cfg, + get_non_network_data_from_vmware_cust_cfg, +) from tests.unittests.helpers import CiTestCase, cloud_init_project_dir logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) @@ -63,23 +63,29 @@ class TestVmwareConfigFile(CiTestCase): self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar") self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar") - def test_datasource_instance_id(self): - """Tests instance id for the DatasourceOVF""" + def test_configfile_without_instance_id(self): + """ + Tests instance id is None when configuration file has no instance id + """ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) - instance_id_prefix = "iid-vmware-" + (md1, _) = get_non_network_data_from_vmware_cust_cfg(conf) + self.assertFalse("instance-id" in md1) - conf = Config(cf) + (md2, _) = get_non_network_data_from_vmware_cust_cfg(conf) + self.assertFalse("instance-id" in md2) - (md1, _, _) = read_vmware_imc(conf) - self.assertIn(instance_id_prefix, md1["instance-id"]) - self.assertEqual(md1["instance-id"], "iid-vmware-imc") + def test_configfile_with_instance_id(self): + """Tests instance id get from configuration file""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic-instance-id.cfg") + conf = Config(cf) - (md2, _, _) = read_vmware_imc(conf) - self.assertIn(instance_id_prefix, md2["instance-id"]) - self.assertEqual(md2["instance-id"], "iid-vmware-imc") + (md1, _) = get_non_network_data_from_vmware_cust_cfg(conf) + self.assertEqual(md1["instance-id"], conf.instance_id, "instance-id") - self.assertEqual(md2["instance-id"], md1["instance-id"]) + (md2, _) = get_non_network_data_from_vmware_cust_cfg(conf) + self.assertEqual(md2["instance-id"], conf.instance_id, "instance-id") def test_configfile_static_2nics(self): """Tests Config class for a configuration with two static NICs.""" @@ -166,7 +172,7 @@ class TestVmwareConfigFile(CiTestCase): config = Config(cf) - network_config = get_network_config_from_conf(config, False) + network_config = get_network_data_from_vmware_cust_cfg(config, False) self.assertEqual(1, network_config.get("version")) @@ -201,14 +207,14 @@ class TestVmwareConfigFile(CiTestCase): ) def test_get_config_dns_suffixes(self): - """Tests if get_network_config_from_conf properly + """Tests if get_network_from_vmware_cust_cfg properly generates nameservers and dns settings from a specified configuration""" cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") config = Config(cf) - network_config = get_network_config_from_conf(config, False) + network_config = get_network_data_from_vmware_cust_cfg(config, False) self.assertEqual(1, network_config.get("version")) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index f4b9403d..8e3c28ac 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -622,51 +622,6 @@ class TestDsIdentify(DsIdentifyBase): """OVF guest info is found on vmware.""" self._test_ds_found("OVF-guestinfo") - def test_ovf_on_vmware_iso_found_when_vmware_customization(self): - """OVF is identified when vmware customization is enabled.""" - self._test_ds_found("OVF-vmware-customization") - - def test_ovf_on_vmware_iso_found_open_vm_tools_64(self): - """OVF is identified when open-vm-tools installed in /usr/lib64.""" - cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"]) - p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" - open64 = "usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so" - cust64["files"][open64] = cust64["files"][p32] - del cust64["files"][p32] - return self._check_via_dict( - cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] - ) - - def test_ovf_on_vmware_iso_found_open_vm_tools_x86_64_linux_gnu(self): - """OVF is identified when open-vm-tools installed in - /usr/lib/x86_64-linux-gnu.""" - cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"]) - p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" - x86 = ( - "usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/" - "libdeployPkgPlugin.so" - ) - cust64["files"][x86] = cust64["files"][p32] - del cust64["files"][p32] - return self._check_via_dict( - cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] - ) - - def test_ovf_on_vmware_iso_found_open_vm_tools_aarch64_linux_gnu(self): - """OVF is identified when open-vm-tools installed in - /usr/lib/aarch64-linux-gnu.""" - cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"]) - p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" - aarch64 = ( - "usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/" - "libdeployPkgPlugin.so" - ) - cust64["files"][aarch64] = cust64["files"][p32] - del cust64["files"][p32] - return self._check_via_dict( - cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] - ) - def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): """OVF is identified by well-known iso9660 labels.""" ovf_cdrom_by_label = copy.deepcopy(VALID_CFG["OVF"]) @@ -832,6 +787,51 @@ class TestDsIdentify(DsIdentifyBase): """VMware: no valid transports""" self._test_ds_not_found("VMware-NoValidTransports") + def test_vmware_on_vmware_when_vmware_customization_is_enabled(self): + """VMware is identified when vmware customization is enabled.""" + self._test_ds_found("VMware-vmware-customization") + + def test_vmware_on_vmware_open_vm_tools_64(self): + """VMware is identified when open-vm-tools installed in /usr/lib64.""" + cust64 = copy.deepcopy(VALID_CFG["VMware-vmware-customization"]) + p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" + open64 = "usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so" + cust64["files"][open64] = cust64["files"][p32] + del cust64["files"][p32] + return self._check_via_dict( + cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] + ) + + def test_vmware_on_vmware_open_vm_tools_x86_64_linux_gnu(self): + """VMware is identified when open-vm-tools installed in + /usr/lib/x86_64-linux-gnu.""" + cust64 = copy.deepcopy(VALID_CFG["VMware-vmware-customization"]) + p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" + x86 = ( + "usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/" + "libdeployPkgPlugin.so" + ) + cust64["files"][x86] = cust64["files"][p32] + del cust64["files"][p32] + return self._check_via_dict( + cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] + ) + + def test_vmware_on_vmware_open_vm_tools_aarch64_linux_gnu(self): + """VMware is identified when open-vm-tools installed in + /usr/lib/aarch64-linux-gnu.""" + cust64 = copy.deepcopy(VALID_CFG["VMware-vmware-customization"]) + p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" + aarch64 = ( + "usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/" + "libdeployPkgPlugin.so" + ) + cust64["files"][aarch64] = cust64["files"][p32] + del cust64["files"][p32] + return self._check_via_dict( + cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] + ) + def test_vmware_envvar_no_data(self): """VMware: envvar transport no data""" self._test_ds_not_found("VMware-EnvVar-NoData") @@ -1256,26 +1256,6 @@ VALID_CFG = { os.path.join(P_SEED_DIR, "ovf", "ovf-env.xml"): "present\n", }, }, - "OVF-vmware-customization": { - "ds": "OVF", - "mocks": [ - # Include a mockes iso9660 potential, even though content not ovf - { - "name": "blkid", - "ret": 0, - "out": blkid_out( - [{"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""}] - ), - }, - MOCK_VIRT_IS_VMWARE, - ], - "files": { - "dev/sr0": "no match", - # Setup vmware customization enabled - "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so": "here", - "etc/cloud/cloud.cfg": "disable_vmware_customization: false\n", - }, - }, "OVF": { "ds": "OVF", "mocks": [ @@ -1624,6 +1604,17 @@ VALID_CFG = { MOCK_VIRT_IS_VMWARE, ], }, + "VMware-vmware-customization": { + "ds": "VMware", + "mocks": [ + MOCK_VIRT_IS_VMWARE, + ], + "files": { + # Setup vmware customization enabled + "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so": "here", + "etc/cloud/cloud.cfg": "disable_vmware_customization: false\n", + }, + }, "VMware-EnvVar-NoData": { "ds": "VMware", "mocks": [ diff --git a/tools/ds-identify b/tools/ds-identify index 0b9f9a8a..cd07565d 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -926,7 +926,7 @@ dscheck_UpCloud() { return ${DS_NOT_FOUND} } -ovf_vmware_guest_customization() { +vmware_guest_customization() { # vmware guest customization # virt provider must be vmware @@ -1040,8 +1040,6 @@ dscheck_OVF() { has_ovf_cdrom && return "${DS_FOUND}" - ovf_vmware_guest_customization && return "${DS_FOUND}" - return ${DS_NOT_FOUND} } @@ -1466,6 +1464,7 @@ dscheck_VMware() { # # * envvars # * guestinfo + # * imc (VMware Guest Customization) # # Please note when updating this function with support for new data # transports, the order should match the order in the _get_data @@ -1499,6 +1498,10 @@ dscheck_VMware() { return "${DS_FOUND}" fi + # Activate the VMware datasource only if tools plugin is available and + # guest customization is enabled. + vmware_guest_customization && return "${DS_FOUND}" + return "${DS_NOT_FOUND}" } -- cgit v1.2.1 From 076b51b362402d821414ee329a90c6ecd5b8aa7a Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Wed, 16 Nov 2022 23:43:28 +0100 Subject: network: Deprecate gateway{4,6} keys in network config v2 (#1794) - Do not render gateway{4,6} when transforming from network config v1 to v2. - Issue a warning if gateway{4,6} is present in network config v2. This warning is not issued if a passthrough to netplan is performed. LP: #1992512 --- cloudinit/net/netplan.py | 30 ++- cloudinit/net/network_state.py | 15 ++ doc/rtd/topics/network-config-format-v2.rst | 2 + tests/unittests/distros/test_netconfig.py | 12 +- tests/unittests/net/test_network_state.py | 141 +++++++++++- tests/unittests/test_net.py | 342 ++++++++++++++++++++++++++-- 6 files changed, 494 insertions(+), 48 deletions(-) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 32fb031c..28c08d6b 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -43,10 +43,10 @@ def _get_params_dict_by_match(config, match): ) -def _extract_addresses(config, entry, ifname, features=None): +def _extract_addresses(config: dict, entry: dict, ifname, features=None): """This method parse a cloudinit.net.network_state dictionary (config) and maps netstate keys/values into a dictionary (entry) to represent - netplan yaml. + netplan yaml. (config v1 -> netplan) An example config dictionary might look like: @@ -81,8 +81,10 @@ def _extract_addresses(config, entry, ifname, features=None): """ def _listify(obj, token=" "): - "Helper to convert strings to list of strings, handle single string" - if not obj or type(obj) not in [str]: + """ + Helper to convert strings to list of strings, handle single string + """ + if not obj or not isinstance(obj, str): return obj if token in obj: return obj.split(token) @@ -112,12 +114,12 @@ def _extract_addresses(config, entry, ifname, features=None): addr = "%s" % subnet.get("address") if "prefix" in subnet: addr += "/%d" % subnet.get("prefix") - if "gateway" in subnet and subnet.get("gateway"): - gateway = subnet.get("gateway") - if ":" in gateway: - entry.update({"gateway6": gateway}) - else: - entry.update({"gateway4": gateway}) + if subnet.get("gateway"): + new_route = { + "via": subnet.get("gateway"), + "to": "default", + } + routes.append(new_route) if "dns_nameservers" in subnet: nameservers += _listify(subnet.get("dns_nameservers", [])) if "dns_search" in subnet: @@ -302,7 +304,7 @@ class Renderer(renderer.Renderer): "successfully for all devices." ) from last_exception - def _render_content(self, network_state: NetworkState): + def _render_content(self, network_state: NetworkState) -> str: # if content already in netplan format, pass it back if network_state.version == 2: @@ -328,11 +330,7 @@ class Renderer(renderer.Renderer): for config in network_state.iter_interfaces(): ifname = config.get("name") # filter None (but not False) entries up front - ifcfg = dict( - (key, value) - for (key, value) in config.items() - if value is not None - ) + ifcfg = dict(filter(lambda it: it[1] is not None, config.items())) if_type = ifcfg.get("type") if if_type == "physical": diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index dd2ff489..e0adb110 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -83,6 +83,16 @@ NET_CONFIG_TO_V2: Dict[str, Dict[str, Any]] = { } +def warn_deprecated_all_devices(dikt: dict) -> None: + """Warn about deprecations of v2 properties for all devices""" + if "gateway4" in dikt or "gateway6" in dikt: + LOG.warning( + "DEPRECATED: The use of `gateway4` and `gateway6` is" + " deprecated. For more info check out: " + "https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v2.html" # noqa: E501 + ) + + def from_state_file(state_file): state = util.read_conf(state_file) nsi = NetworkStateInterpreter() @@ -750,6 +760,8 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): if key in cfg: phy_cmd[key] = cfg[key] + warn_deprecated_all_devices(cfg) + subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: phy_cmd.update({"subnets": subnets}) @@ -784,6 +796,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): } if "mtu" in cfg: vlan_cmd["mtu"] = cfg["mtu"] + warn_deprecated_all_devices(cfg) subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: vlan_cmd.update({"subnets": subnets}) @@ -852,6 +865,8 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): } if "mtu" in item_cfg: v1_cmd["mtu"] = item_cfg["mtu"] + + warn_deprecated_all_devices(item_cfg) subnets = self._v2_to_v1_ipcfg(item_cfg) if len(subnets) > 0: v1_cmd.update({"subnets": subnets}) diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index 53274417..b3e1df27 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -233,6 +233,7 @@ Example: ``addresses: [192.168.14.2/24, 2001:1::1/64]`` **gateway4**: or **gateway6**: *<(scalar)>* +Deprecated, see `netplan#default-routes`_. Set default gateway for IPv4/6, for manual address configuration. This requires setting ``addresses`` too. Gateway IPs must be in a form recognized by ``inet_pton(3)`` @@ -572,5 +573,6 @@ This is a complex example which shows most available features: :: dhcp4: yes .. _netplan: https://netplan.io +.. _netplan#default-routes: https://netplan.io/reference#default-routes .. _netplan#dhcp-overrides: https://netplan.io/reference#dhcp-overrides .. vi: textwidth=79 diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index f17a5d21..e2694b09 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -190,7 +190,9 @@ network: eth0: addresses: - 192.168.1.5/24 - gateway4: 192.168.1.254 + routes: + - to: default + via: 192.168.1.254 eth1: dhcp4: true """ @@ -207,7 +209,9 @@ network: eth0: addresses: - 2607:f0d0:1002:0011::2/64 - gateway6: 2607:f0d0:1002:0011::1 + routes: + - to: default + via: 2607:f0d0:1002:0011::1 eth1: dhcp4: true """ @@ -976,7 +980,9 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase): eth0: addresses: - 192.168.1.5/24 - gateway4: 192.168.1.254 + routes: + - to: default + via: 192.168.1.254 eth1: dhcp4: true """ diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py index 75d033dc..57a4436f 100644 --- a/tests/unittests/net/test_network_state.py +++ b/tests/unittests/net/test_network_state.py @@ -6,6 +6,8 @@ import pytest from cloudinit import safeyaml from cloudinit.net import network_state +from cloudinit.net.netplan import Renderer as NetplanRenderer +from cloudinit.net.renderers import NAME_TO_RENDERER from tests.unittests.helpers import CiTestCase netstate_path = "cloudinit.net.network_state" @@ -99,15 +101,140 @@ class TestNetworkStateParseConfig(CiTestCase): self.assertNotEqual(None, result) -class TestNetworkStateParseConfigV2(CiTestCase): - def test_version_2_ignores_renderer_key(self): +@mock.patch("cloudinit.net.network_state.get_interfaces_by_mac") +class TestNetworkStateParseConfigV2: + def test_version_2_ignores_renderer_key(self, m_get_interfaces_by_mac): ncfg = {"version": 2, "renderer": "networkd", "ethernets": {}} - with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"): - nsi = network_state.NetworkStateInterpreter( - version=ncfg["version"], config=ncfg + nsi = network_state.NetworkStateInterpreter( + version=ncfg["version"], config=ncfg + ) + nsi.parse_config(skip_broken=False) + assert ncfg == nsi.as_dict()["config"] + + @pytest.mark.parametrize( + "cfg", + [ + pytest.param( + """ + version: 2 + ethernets: + eth0: + addresses: + - 10.54.2.19/21 + - 2a00:1730:fff9:100::52/128 + {gateway4} + {gateway6} + match: + macaddress: 52:54:00:3f:fc:f7 + nameservers: + addresses: + - 10.52.1.1 + - 10.52.1.71 + - 2001:4860:4860::8888 + - 2001:4860:4860::8844 + set-name: eth0 + """, + id="ethernets", + ), + pytest.param( + """ + version: 2 + vlans: + encc000.2653: + id: 2653 + link: "encc000" + addresses: + - 10.54.2.19/21 + - 2a00:1730:fff9:100::52/128 + {gateway4} + {gateway6} + nameservers: + addresses: + - 10.52.1.1 + - 10.52.1.71 + - 2001:4860:4860::8888 + - 2001:4860:4860::8844 + """, + id="vlan", + ), + pytest.param( + """ + version: 2 + bonds: + bond0: + addresses: + - 10.54.2.19/21 + - 2a00:1730:fff9:100::52/128 + {gateway4} + {gateway6} + interfaces: + - enp0s0 + - enp0s1 + mtu: 1334 + parameters: {{}} + """, + id="bond", + ), + pytest.param( + """ + version: 2 + bridges: + bridge0: + addresses: + - 10.54.2.19/21 + - 2a00:1730:fff9:100::52/128 + {gateway4} + {gateway6} + interfaces: + - enp0s0 + - enp0s1 + parameters: {{}} + """, + id="bridge", + ), + ], + ) + @pytest.mark.parametrize( + "renderer_cls", + [ + pytest.param(None, id="non-netplan"), + ] + + [ + pytest.param(mod.Renderer, id=name) + for name, mod in NAME_TO_RENDERER.items() + ], + ) + def test_v2_warns_deprecated_gateways( + self, m_get_interfaces_by_mac, renderer_cls, cfg: str, caplog + ): + """ + Tests that a v2 netconf with the deprecated `gateway4` or `gateway6` + issues a warning about it only on non netplan targets. + + In netplan targets we perform a passthrough and the warning is not + needed. + """ + ncfg = safeyaml.load( + cfg.format( + gateway4="gateway4: 10.54.0.1", + gateway6="gateway6: 2a00:1730:fff9:100::1", ) - nsi.parse_config(skip_broken=False) - self.assertEqual(ncfg, nsi.as_dict()["config"]) + ) + nsi = network_state.NetworkStateInterpreter( + version=ncfg["version"], + config=ncfg, + renderer=mock.MagicMock(spec=renderer_cls), + ) + nsi.parse_config(skip_broken=False) + assert ncfg == nsi.as_dict()["config"] + + if renderer_cls != NetplanRenderer: + count = 1 # Only one deprecation + else: + count = 0 # No deprecation as we passthrough + assert count == caplog.text.count( + "DEPRECATED: The use of `gateway4` and `gateway6` is" + ) class TestNetworkStateParseNameservers: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 525706d1..90447315 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -8,6 +8,7 @@ import json import os import re import textwrap +from typing import Optional import pytest from yaml.serializer import Serializer @@ -379,7 +380,6 @@ network: bondM: addresses: - 10.101.10.47/23 - gateway4: 10.101.11.254 interfaces: - eno1 - eno3 @@ -401,6 +401,9 @@ network: mode: 802.3ad transmit-hash-policy: layer3+4 up-delay: 0 + routes: + - to: default + via: 10.101.11.254 vlans: bond0.3502: addresses: @@ -2247,7 +2250,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true addresses: - 192.168.0.2/24 - 192.168.2.10/24 - gateway4: 192.168.0.1 id: 101 link: eth0 macaddress: aa:bb:cc:dd:ee:11 @@ -2260,6 +2262,9 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true - barley.maas - sacchromyces.maas - brettanomyces.maas + routes: + - to: default + via: 192.168.0.1 """ ).rstrip(" "), "expected_sysconfig_opensuse": { @@ -2971,7 +2976,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true - 192.168.0.2/24 - 192.168.1.2/24 - 2001:1::1/92 - gateway4: 192.168.0.1 interfaces: - bond0s0 - bond0s1 @@ -2988,6 +2992,8 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true transmit-hash-policy: layer3+4 up-delay: 20 routes: + - to: default + via: 192.168.0.1 - to: 10.1.3.0/24 via: 192.168.0.3 - to: 2001:67c::/32 @@ -5993,26 +5999,330 @@ iface eth0 inet dhcp ) -class TestNetplanNetRendering(CiTestCase): +class TestNetplanNetRendering: + @pytest.mark.parametrize( + "network_cfg,expected", + [ + pytest.param( + None, + """ + network: + ethernets: + eth1000: + dhcp4: true + match: + macaddress: 07-1c-c6-75-a4-be + set-name: eth1000 + version: 2 + """, + id="default_generation", + ), + # Asserts a netconf v1 with a physical device and two gateways + # does not produce deprecated keys, `gateway{46}`, in Netplan v2 + pytest.param( + """ + version: 1 + config: + - type: physical + name: interface0 + mac_address: '00:11:22:33:44:55' + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: static + address: 11.0.0.11/24 + gateway: 11.0.0.1 + """, + """ + network: + version: 2 + ethernets: + interface0: + addresses: + - 192.168.23.14/27 + - 11.0.0.11/24 + match: + macaddress: 00:11:22:33:44:55 + set-name: interface0 + routes: + - to: default + via: 192.168.23.1 + - to: default + via: 11.0.0.1 + """, + id="physical_gateway46", + ), + # Asserts a netconf v1 with a bond device and two gateways + # does not produce deprecated keys, `gateway{46}`, in Netplan v2 + pytest.param( + """ + version: 1 + config: + - type: bond + name: bond0 + bond_interfaces: + - eth0 + - eth1 + params: {} + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: static + address: 11.0.0.11/24 + gateway: 11.0.0.1 + """, + """ + network: + version: 2 + bonds: + bond0: + addresses: + - 192.168.23.14/27 + - 11.0.0.11/24 + interfaces: + - eth0 + - eth1 + routes: + - to: default + via: 192.168.23.1 + - to: default + via: 11.0.0.1 + eth0: {} + eth1: {} + """, + id="bond_gateway46", + ), + # Asserts a netconf v1 with a bridge device and two gateways + # does not produce deprecated keys, `gateway{46}`, in Netplan v2 + pytest.param( + """ + version: 1 + config: + - type: bridge + name: bridge0 + bridge_interfaces: + - eth0 + params: {} + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: static + address: 11.0.0.11/24 + gateway: 11.0.0.1 + """, + """ + network: + version: 2 + bridges: + bridge0: + addresses: + - 192.168.23.14/27 + - 11.0.0.11/24 + interfaces: + - eth0 + routes: + - to: default + via: 192.168.23.1 + - to: default + via: 11.0.0.1 + """, + id="bridge_gateway46", + ), + # Asserts a netconf v1 with a vlan device and two gateways + # does not produce deprecated keys, `gateway{46}`, in Netplan v2 + pytest.param( + """ + version: 1 + config: + - type: vlan + name: vlan0 + vlan_link: eth0 + vlan_id: 101 + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: static + address: 11.0.0.11/24 + gateway: 11.0.0.1 + """, + """ + network: + version: 2 + vlans: + vlan0: + addresses: + - 192.168.23.14/27 + - 11.0.0.11/24 + id: 101 + link: eth0 + routes: + - to: default + via: 192.168.23.1 + - to: default + via: 11.0.0.1 + """, + id="vlan_gateway46", + ), + # Asserts a netconf v1 with a nameserver device and two gateways + # does not produce deprecated keys, `gateway{46}`, in Netplan v2 + pytest.param( + """ + version: 1 + config: + - type: physical + name: interface0 + mac_address: '00:11:22:33:44:55' + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: nameserver + address: + - 192.168.23.14/27 + - 11.0.0.11/24 + search: + - exemplary + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: static + address: 11.0.0.11/24 + gateway: 11.0.0.1 + """, + """ + network: + version: 2 + ethernets: + interface0: + addresses: + - 192.168.23.14/27 + match: + macaddress: 00:11:22:33:44:55 + nameservers: + addresses: + - 192.168.23.14/27 + - 11.0.0.11/24 + search: + - exemplary + set-name: interface0 + routes: + - to: default + via: 192.168.23.1 + """, + id="nameserver_gateway4", + ), + # Asserts a netconf v1 with two subnets with two gateways does + # not clash + pytest.param( + """ + version: 1 + config: + - type: physical + name: interface0 + mac_address: '00:11:22:33:44:55' + subnets: + - type: static + address: 192.168.23.14/24 + gateway: 192.168.23.1 + - type: static + address: 10.184.225.122 + routes: + - network: 10.176.0.0 + gateway: 10.184.225.121 + """, + """ + network: + version: 2 + ethernets: + interface0: + addresses: + - 192.168.23.14/24 + - 10.184.225.122/24 + match: + macaddress: 00:11:22:33:44:55 + routes: + - to: default + via: 192.168.23.1 + - to: 10.176.0.0/24 + via: 10.184.225.121 + set-name: interface0 + """, + id="two_subnets_old_new_gateway46", + ), + # Asserts a netconf v1 with one subnet with two gateways does + # not clash + pytest.param( + """ + version: 1 + config: + - type: physical + name: interface0 + mac_address: '00:11:22:33:44:55' + subnets: + - type: static + address: 192.168.23.14/24 + gateway: 192.168.23.1 + routes: + - network: 192.167.225.122 + gateway: 192.168.23.1 + """, + """ + network: + version: 2 + ethernets: + interface0: + addresses: + - 192.168.23.14/24 + match: + macaddress: 00:11:22:33:44:55 + routes: + - to: default + via: 192.168.23.1 + - to: 192.167.225.122/24 + via: 192.168.23.1 + set-name: interface0 + """, + id="one_subnet_old_new_gateway46", + ), + ], + ) + @mock.patch( + "cloudinit.net.netplan.Renderer.features", + new_callable=mock.PropertyMock(return_value=[]), + ) @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.netplan._clean_default") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_default_generation( + def test_render( self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path, mock_clean_default, m_get_cmdline, + m_renderer_features, + network_cfg: Optional[str], + expected: str, + tmpdir, ): - tmp_dir = self.tmp_dir() + tmp_dir = str(tmpdir) _setup_test( tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path ) - network_cfg = net.generate_fallback_config() + if network_cfg is None: + network_cfg = net.generate_fallback_config() + else: + network_cfg = yaml.load(network_cfg) + assert isinstance(network_cfg, dict) + ns = network_state.parse_net_config_data( network_cfg, skip_broken=False ) @@ -6026,25 +6336,13 @@ class TestNetplanNetRendering(CiTestCase): ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue( - os.path.exists(os.path.join(render_dir, render_target)) - ) + assert os.path.exists(os.path.join(render_dir, render_target)) with open(os.path.join(render_dir, render_target)) as fh: contents = fh.read() print(contents) - expected = """ -network: - ethernets: - eth1000: - dhcp4: true - match: - macaddress: 07-1c-c6-75-a4-be - set-name: eth1000 - version: 2 -""" - self.assertEqual(expected.lstrip(), contents.lstrip()) - self.assertEqual(1, mock_clean_default.call_count) + assert yaml.load(expected) == yaml.load(contents) + assert 1, mock_clean_default.call_count class TestNetplanCleanDefault(CiTestCase): -- cgit v1.2.1 From 33eb9b13937f4956f7c7de96ca43e1991b1be813 Mon Sep 17 00:00:00 2001 From: sxt1001 Date: Thu, 17 Nov 2022 20:18:11 +0800 Subject: add xiaoge1001 to .github-cla-signers (#1854) --- tools/.github-cla-signers | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 2826b9d8..b107c078 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -127,6 +127,7 @@ Vultaire WebSpider wschoot xiachen-rh +xiaoge1001 xnox yangzz-97 yawkat -- cgit v1.2.1 From 25da3c774a39c12f004f14678d9c192f20aa44a1 Mon Sep 17 00:00:00 2001 From: Marvin Vogt Date: Thu, 17 Nov 2022 22:44:36 +0100 Subject: Add Support for IPv6 metadata to OpenStack (#1805) - Add openstack IPv6 metadata url fe80::a9fe:a9fe - Enable requesting multiple metadata sources in parallel This PR is very similar to #1160, reusing the provided `url_heper` logic. LP: #1906849 --- cloudinit/sources/DataSourceOpenStack.py | 7 ++++--- tools/.github-cla-signers | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 915ed0c0..a07e355c 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -18,7 +18,7 @@ from cloudinit.sources.helpers import openstack LOG = logging.getLogger(__name__) # Various defaults/constants... -DEF_MD_URL = "http://169.254.169.254" +DEF_MD_URLS = ["http://[fe80::a9fe:a9fe]", "http://169.254.169.254"] DEFAULT_IID = "iid-dsopenstack" DEFAULT_METADATA = { "instance-id": DEFAULT_IID, @@ -74,7 +74,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): return mstr def wait_for_metadata_service(self): - urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) + urls = self.ds_cfg.get("metadata_urls", DEF_MD_URLS) filtered = [x for x in urls if util.is_resolvable_url(x)] if set(filtered) != set(urls): LOG.debug( @@ -85,7 +85,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): urls = filtered else: LOG.warning("Empty metadata url list! using default list") - urls = [DEF_MD_URL] + urls = DEF_MD_URLS md_urls = [] url2base = {} @@ -100,6 +100,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): urls=md_urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds, + connect_synchronously=False, ) if avail_url: LOG.debug("Using metadata source: '%s'", url2base[avail_url]) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index b107c078..5cb319d6 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -98,6 +98,7 @@ rmhsawyer rongz609 s-makin SadeghHayeri +SRv6d sarahwzadara scorpion44 shaardie -- cgit v1.2.1 From 3776a8812dc649d13cfc915d81fb4b9219dcaca8 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Thu, 17 Nov 2022 22:53:05 +0100 Subject: doc: improve module creation explanation (#1851) Explain that the new module name must be included in jsonschema.$defs.all_modules to be validated within the base config. --- doc/rtd/topics/module_creation.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/topics/module_creation.rst index 56cadec4..4a248a21 100644 --- a/doc/rtd/topics/module_creation.rst +++ b/doc/rtd/topics/module_creation.rst @@ -70,8 +70,12 @@ Guidelines module definition in ``/etc/cloud/cloud.cfg[.d]`` has been modified to pass arguments to this module. +* Your module name must be included in `schema-cloud-config.json`_ + under ``$defs.all_modules``, in order to be accepted as a module key in + the :ref:`base config`. + * If your module introduces any new cloud-config keys, you must provide a - schema definition in `cloud-init-schema.json`_. + schema definition in `schema-cloud-config.json`_. * The ``meta`` variable must exist and be of type `MetaSchema`_. * ``id``: The module id. In most cases this will be the filename without @@ -121,7 +125,7 @@ in the ``cloud_final_modules`` section before the ``final-message`` module. .. _MetaSchema: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/config/schema.py#L58 .. _OSFAMILIES: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/distros/__init__.py#L35 .. _settings.py: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/settings.py#L66 -.. _cloud-init-schema.json: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/schemas/versions.schema.cloud-config.json +.. _schema-cloud-config.json: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/schemas/schema-cloud-config-v1.json .. _cloud.cfg.tmpl: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl .. _cloud_init_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L70 .. _cloud_config_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L101 -- cgit v1.2.1 From e2fb079a72402b59c5476d64d23331f7f1c5e945 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Fri, 18 Nov 2022 15:55:44 +0100 Subject: doc: add how to render new module doc (#1855) --- doc/rtd/topics/module_creation.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/topics/module_creation.rst index 4a248a21..69a6a8ae 100644 --- a/doc/rtd/topics/module_creation.rst +++ b/doc/rtd/topics/module_creation.rst @@ -74,6 +74,8 @@ Guidelines under ``$defs.all_modules``, in order to be accepted as a module key in the :ref:`base config`. +* Add the new module to `Module Reference`_. + * If your module introduces any new cloud-config keys, you must provide a schema definition in `schema-cloud-config.json`_. * The ``meta`` variable must exist and be of type `MetaSchema`_. @@ -130,3 +132,4 @@ in the ``cloud_final_modules`` section before the ``final-message`` module. .. _cloud_init_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L70 .. _cloud_config_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L101 .. _cloud_final_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L144 +.. _Module Reference: https://github.com/canonical/cloud-init/blob/main/doc/rtd/topics/modules.rst -- cgit v1.2.1 From 24bf6147712655fc36a5d714a081853ea37e0312 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Fri, 18 Nov 2022 14:31:27 -0500 Subject: net: skip duplicate mac check for netvsc nic and its VF (#1853) When accelerated network is enabled on Azure, the host presents two network interfaces with the same mac address to the VM: a synthetic nic (netvsc) and a VF nic, which is enslaved to the synthetic nic. The net module is already excluding slave nics when enumerating interfaces. However, if cloud-init starts enumerating after the kernel makes the VF visible to userspace, but before the enslaving has finished, cloud-init will see two nics with duplicate mac. This change will skip the duplicate mac error if one of the two nics with duplicate mac is a netvsc nic LP: #1844191 --- cloudinit/net/__init__.py | 38 ++++++++++++++++++++++++++++++++++---- tests/unittests/test_net.py | 25 +++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 4 deletions(-) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 4bc48676..0a41a2d4 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -1000,15 +1000,45 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict: Bridges and any devices that have a 'stolen' mac are excluded.""" ret: dict = {} - for name, mac, _driver, _devid in get_interfaces( + driver_map: dict = {} + for name, mac, driver, _devid in get_interfaces( blacklist_drivers=blacklist_drivers ): if mac in ret: - raise RuntimeError( - "duplicate mac found! both '%s' and '%s' have mac '%s'" - % (name, ret[mac], mac) + raise_duplicate_mac_error = True + msg = "duplicate mac found! both '%s' and '%s' have mac '%s'." % ( + name, + ret[mac], + mac, ) + # Hyper-V netvsc driver will register a VF with the same mac + # + # The VF will be enslaved to the master nic shortly after + # registration. If cloud-init starts enumerating the interfaces + # before the completion of the enslaving process, it will see + # two different nics with duplicate mac. Cloud-init should ignore + # the slave nic (which does not have hv_netvsc driver). + if driver != driver_map[mac]: + if driver_map[mac] == "hv_netvsc": + LOG.warning( + msg + " Ignoring '%s' due to driver '%s' and " + "'%s' having driver hv_netvsc." + % (name, driver, ret[mac]) + ) + continue + if driver == "hv_netvsc": + raise_duplicate_mac_error = False + LOG.warning( + msg + " Ignoring '%s' due to driver '%s' and " + "'%s' having driver hv_netvsc." + % (ret[mac], driver_map[mac], name) + ) + + if raise_duplicate_mac_error: + raise RuntimeError(msg) + ret[mac] = name + driver_map[mac] = driver # Pretend that an Infiniband GUID is an ethernet address for Openstack # configuration purposes diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 90447315..bf6e375d 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -7981,6 +7981,10 @@ class TestGetInterfacesByMac(CiTestCase): "bridge1", "bond1.101", "lo", + "netvsc0-vf", + "netvsc0", + "netvsc1", + "netvsc1-vf", ], "macs": { "enp0s1": "aa:aa:aa:aa:aa:01", @@ -7991,14 +7995,27 @@ class TestGetInterfacesByMac(CiTestCase): "bridge1-nic": "aa:aa:aa:aa:aa:03", "lo": "00:00:00:00:00:00", "greptap0": "00:00:00:00:00:00", + "netvsc0-vf": "aa:aa:aa:aa:aa:04", + "netvsc0": "aa:aa:aa:aa:aa:04", + "netvsc1-vf": "aa:aa:aa:aa:aa:05", + "netvsc1": "aa:aa:aa:aa:aa:05", "tun0": None, }, + "drivers": { + "netvsc0": "hv_netvsc", + "netvsc0-vf": "foo", + "netvsc1": "hv_netvsc", + "netvsc1-vf": "bar", + }, } data: dict = {} def _se_get_devicelist(self): return list(self.data["devices"]) + def _se_device_driver(self, name): + return self.data["drivers"].get(name, None) + def _se_get_interface_mac(self, name): return self.data["macs"][name] @@ -8020,6 +8037,7 @@ class TestGetInterfacesByMac(CiTestCase): self.data["devices"] = set(list(self.data["macs"].keys())) mocks = ( "get_devicelist", + "device_driver", "get_interface_mac", "is_bridge", "interface_has_own_mac", @@ -8039,6 +8057,11 @@ class TestGetInterfacesByMac(CiTestCase): self.data["macs"]["bridge1-nic"] = self.data["macs"]["enp0s1"] self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + def test_raise_exception_on_duplicate_netvsc_macs(self): + self._mock_setup() + self.data["macs"]["netvsc0"] = self.data["macs"]["netvsc1"] + self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + def test_excludes_any_without_mac_address(self): self._mock_setup() ret = net.get_interfaces_by_mac() @@ -8057,6 +8080,8 @@ class TestGetInterfacesByMac(CiTestCase): "aa:aa:aa:aa:aa:02": "enp0s2", "aa:aa:aa:aa:aa:03": "bridge1-nic", "00:00:00:00:00:00": "lo", + "aa:aa:aa:aa:aa:04": "netvsc0", + "aa:aa:aa:aa:aa:05": "netvsc1", }, ret, ) -- cgit v1.2.1 From 74ed1387afb60b79bf0d24afe5967c510ea51074 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 18 Nov 2022 15:39:53 -0700 Subject: tests: ds-id mocks for vmware-rpctool as utility may not exist in env Some build environments will not have open-vm-tools and consequently vmware-rpctool installed. Since we are mocking behavior of tests we fake the presence of this utility to ensure VMware is properly detected. --- tests/unittests/test_ds_identify.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 8e3c28ac..cc75209e 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -1608,6 +1608,11 @@ VALID_CFG = { "ds": "VMware", "mocks": [ MOCK_VIRT_IS_VMWARE, + { + "name": "vmware_has_rpctool", + "ret": 0, + "out": "/usr/bin/vmware-rpctool", + }, ], "files": { # Setup vmware customization enabled -- cgit v1.2.1 From 3be6553404fd3fa37a5ed17df836a301913fe3ca Mon Sep 17 00:00:00 2001 From: Nigel Kukard Date: Mon, 21 Nov 2022 10:47:53 +0000 Subject: Add "nkukard" as contributor (#1864) --- tools/.github-cla-signers | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 5cb319d6..8adca985 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -84,6 +84,7 @@ nazunalika netcho nicolasbock nishigori +nkukard olivierlemasle omBratteng onitake -- cgit v1.2.1 From a5632e07fab8a02b8b9ab037dd00e300bf9c33ac Mon Sep 17 00:00:00 2001 From: sxt1001 Date: Mon, 21 Nov 2022 19:36:27 +0800 Subject: test_cloud_sigma: delete useless test (#1828) --- tests/unittests/sources/helpers/test_cloudsigma.py | 67 ---------------------- 1 file changed, 67 deletions(-) delete mode 100644 tests/unittests/sources/helpers/test_cloudsigma.py diff --git a/tests/unittests/sources/helpers/test_cloudsigma.py b/tests/unittests/sources/helpers/test_cloudsigma.py deleted file mode 100644 index 3c687388..00000000 --- a/tests/unittests/sources/helpers/test_cloudsigma.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.sources.helpers.cloudsigma import Cepko -from tests.unittests import helpers as test_helpers - -SERVER_CONTEXT = { - "cpu": 1000, - "cpus_instead_of_cores": False, - "global_context": {"some_global_key": "some_global_val"}, - "mem": 1073741824, - "meta": {"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"}, - "name": "test_server", - "requirements": [], - "smp": 1, - "tags": ["much server", "very performance"], - "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889", - "vnc_password": "9e84d6cb49e46379", -} - - -class CepkoMock(Cepko): - def all(self): - return SERVER_CONTEXT - - def get(self, key="", request_pattern=None): - return SERVER_CONTEXT["tags"] - - -# 2015-01-22 BAW: This test is completely useless because it only ever tests -# the CepkoMock object. Even in its original form, I don't think it ever -# touched the underlying Cepko class methods. -class CepkoResultTests(test_helpers.TestCase): - def setUp(self): - self.c = Cepko() - raise test_helpers.SkipTest("This test is completely useless") - - def test_getitem(self): - result = self.c.all() - self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result["uuid"]) - self.assertEqual([], result["requirements"]) - self.assertEqual("much server", result["tags"][0]) - self.assertEqual(1, result["smp"]) - - def test_len(self): - self.assertEqual(len(SERVER_CONTEXT), len(self.c.all())) - - def test_contains(self): - result = self.c.all() - self.assertTrue("uuid" in result) - self.assertFalse("uid" in result) - self.assertTrue("meta" in result) - self.assertFalse("ssh_public_key" in result) - - def test_iter(self): - self.assertEqual( - sorted(SERVER_CONTEXT.keys()), - sorted([key for key in self.c.all()]), - ) - - def test_with_list_as_result(self): - result = self.c.get("tags") - self.assertEqual("much server", result[0]) - self.assertTrue("very performance" in result) - self.assertEqual(2, len(result)) - - -# vi: ts=4 expandtab -- cgit v1.2.1 From 5c1bd34e36dbce6b378e8c59d8bd105e285ddb33 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Mon, 21 Nov 2022 14:48:49 +0100 Subject: travis: promote 3.11-dev to 3.11 (#1866) --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 6456204b..d3421e17 100644 --- a/.travis.yml +++ b/.travis.yml @@ -147,7 +147,7 @@ matrix: # Test all supported Python versions (but at the end, so we schedule # longer-running jobs first) - python: 3.12-dev - - python: 3.11-dev + - python: 3.11 - python: "3.10" - python: 3.9 - python: 3.8 -- cgit v1.2.1 From 47174014c09e454b452c234a118756e981f95a01 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 21 Nov 2022 14:13:14 -0600 Subject: Update read-version Use 'git describe ' as the version number, even if the version differs from what is in version.py. This means that if the most recent commit is a tag, we'll get the tag number, otherwise we'll also show the number of commits since the last tag on the branch. Fix setup.py to align with PEP 440 versioning replacing trailing hyphen beyond major.minor.patch-g with a "+". Additionally, did some cleanup and typing fixes on the script. --- setup.py | 8 +++++++- tools/read-version | 54 +++++++++++++++++++++--------------------------------- 2 files changed, 28 insertions(+), 34 deletions(-) diff --git a/setup.py b/setup.py index 470dd774..04aae5b2 100644 --- a/setup.py +++ b/setup.py @@ -73,7 +73,13 @@ def in_virtualenv(): def get_version(): cmd = [sys.executable, "tools/read-version"] ver = subprocess.check_output(cmd) - return ver.decode("utf-8").strip() + version = ver.decode("utf-8").strip() + # read-version can spit out something like 22.4-15-g7f97aee24 + # which is invalid under PEP440. If we replace the first - with a + + # that should give us a valid version. + if "-" in version: + version = version.replace("-", "+", 1) + return version def read_requires(): diff --git a/tools/read-version b/tools/read-version index 9eaecb33..50f91b5d 100755 --- a/tools/read-version +++ b/tools/read-version @@ -5,10 +5,10 @@ import json import subprocess import sys -if "avoid-pep8-E402-import-not-top-of-file": - _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) - sys.path.insert(0, _tdir) - from cloudinit import version as ci_version +_tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.insert(0, _tdir) + +from cloudinit import version as ci_version # noqa: E402 def tiny_p(cmd): @@ -55,13 +55,17 @@ use_tags = "--tags" in sys.argv or os.environ.get("CI_RV_TAGS") output_json = "--json" in sys.argv src_version = ci_version.version_string() -version_long = None +# upstream/MM.NN.x tracks our patch level releases so ignore trailing '.x' +major_minor_version = ".".join(src_version.split(".")[:2]) +version_long = "" # If we're performing CI for a new release branch (which our tooling creates # with an "upstream/" prefix), then we don't want to enforce strict version # matching because we know it will fail. github_ci_release_br = bool( - os.environ.get("GITHUB_HEAD_REF", "").startswith(f"upstream/{src_version}") + os.environ.get("GITHUB_HEAD_REF", "").startswith( + f"upstream/{major_minor_version}" + ) ) travis_ci_release_br = bool( os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/") @@ -72,39 +76,25 @@ if is_gitdir(_tdir) and which("git") and not is_release_branch_ci: # This cmd can be simplified to ["git", "branch", "--show-current"] # after bionic EOL. branch_name = tiny_p(["git", "rev-parse", "--abbrev-ref", "HEAD"]).strip() - if branch_name.startswith(f"upstream/{src_version}"): + if branch_name.startswith(f"upstream/{major_minor_version}"): version = src_version - version_long = None + version_long = "" else: flags = [] if use_tags: flags = ["--tags"] - cmd = ["git", "describe", "--abbrev=8", "--match=[0-9]*"] + flags - - try: - version = tiny_p(cmd).strip() - except RuntimeError: - version = None - - if version is None or not version.startswith(src_version): - sys.stderr.write( - f"git describe version ({version}) differs from " - f"cloudinit.version ({src_version})\n" - ) - sys.stderr.write( - "Please get the latest upstream tags.\n" - "As an example, this can be done with the following:\n" - "$ git remote add upstream https://git.launchpad.net/" - "cloud-init\n" - "$ git fetch upstream --tags\n" - ) - sys.exit(1) + cmd = [ + "git", + "describe", + branch_name, + ] + flags + version = tiny_p(cmd).strip() version_long = tiny_p(cmd + ["--long"]).strip() else: version = src_version - version_long = None + version_long = "" # version is X.Y.Z[+xxx.gHASH] # version_long is None or X.Y.Z-xxx-gHASH @@ -115,7 +105,7 @@ distance = None if version_long: info = version_long.partition("-")[2] - extra = "-" + info + extra = f"-{info}" distance, commit = info.split("-") # remove the 'g' from gHASH commit = commit[1:] @@ -133,8 +123,6 @@ data = { if output_json: sys.stdout.write(json.dumps(data, indent=1) + "\n") else: - sys.stdout.write(release + "\n") + sys.stdout.write(version + "\n") sys.exit(0) - -# vi: ts=4 expandtab -- cgit v1.2.1 From c038808e86306e48eec65b1ab361b7e346f73d35 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 21 Nov 2022 14:36:39 -0600 Subject: Release 22.4.1 Bump the version in cloudinit/version.py to 22.4.1 and update ChangeLog. --- ChangeLog | 6 ++++++ cloudinit/version.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index eb9a104b..2e149189 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +22.4.1 + - net: skip duplicate mac check for netvsc nic and its VF (#1853) + [Anh Vo] (LP: #1844191) + - ChangeLog: whitespace cleanup (#1850) + - changelog: capture 22.3.1-4 releases + 22.4 - test: fix pro integration test [Alberto Contreras] - cc_disk_setup: pass options in correct order to utils (#1829) diff --git a/cloudinit/version.py b/cloudinit/version.py index b9b42af3..4b739354 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "22.4" +__VERSION__ = "22.4.1" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ -- cgit v1.2.1 From f1a9e44ecdf74c6f407afedd74454565a1e20948 Mon Sep 17 00:00:00 2001 From: s-makin Date: Tue, 22 Nov 2022 20:34:28 +0000 Subject: Docs: adding relative links - index.rst: changed contributing link to :ref: - found CONTRIBUTING.rst had rtd.io links still in it - added labels to several topic pages to fix that issue - changed all of those to :ref: links --- CONTRIBUTING.rst | 14 ++++++-------- doc-requirements.txt | 2 +- doc/rtd/index.rst | 5 ++--- doc/rtd/topics/code_review.rst | 3 ++- doc/rtd/topics/module_creation.rst | 11 ++--------- doc/rtd/topics/modules.rst | 3 +++ doc/rtd/topics/network-config-format-v2.rst | 2 -- doc/rtd/topics/tutorial.rst | 7 +++---- 8 files changed, 19 insertions(+), 28 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 62628fd5..e9e25499 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,3 +1,5 @@ +.. _contributing: + Contributing to cloud-init ************************** @@ -17,15 +19,12 @@ Before any pull request can be accepted, you must do the following: * Add your Github username (alphabetically) to the in-repository list that we use to track CLA signatures: `tools/.github-cla-signers`_ -* Add or update any `unit tests`_ accordingly -* Add or update any `integration tests`_ (if applicable) +* Add or update any :ref:`unit tests` accordingly +* Add or update any :ref:`integration_tests` (if applicable) * Format code (using black and isort) with `tox -e do_format` * Ensure unit tests and linting pass using `tox`_ * Submit a PR against the `main` branch of the `cloud-init` repository -.. _unit tests: https://cloudinit.readthedocs.io/en/latest/topics/testing.html -.. _integration tests: https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html - The detailed instructions ------------------------- @@ -77,7 +76,7 @@ Follow these steps to submit your first pull request to cloud-init: git remote add upstream git@github.com:canonical/cloud-init.git git push origin main -* Read through the cloud-init `Code Review Process`_, so you understand +* Read through the cloud-init :ref:`Code Review Process`, so you understand how your changes will end up in cloud-init's codebase. * Submit your first cloud-init pull request, adding your Github username to the @@ -182,14 +181,13 @@ Do these things for each feature or bug - Click 'Create Pull Request` Then, a cloud-init committer will review your changes and -follow up in the pull request. Look at the `Code Review Process`_ doc +follow up in the pull request. Look at the :ref:`Code Review Process` doc to understand the following steps. Feel free to ping and/or join ``#cloud-init`` on Libera irc if you have any questions. .. _tox: https://tox.readthedocs.io/en/latest/ -.. _Code Review Process: https://cloudinit.readthedocs.io/en/latest/topics/code_review.html Design ====== diff --git a/doc-requirements.txt b/doc-requirements.txt index 6f48062e..359da21d 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -3,4 +3,4 @@ furo m2r2 pyyaml sphinx -sphinx-design +sphinx_design diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 94d7c882..fd062e23 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -1,6 +1,6 @@ .. _index: -cloud-init Documentation +cloud-init documentation ######################## Cloud-init is the *industry standard* multi-distribution method for @@ -28,7 +28,7 @@ projects, contributions, suggestions, fixes and constructive feedback. * Read our `Code of conduct`_ * Ask questions in the ``#cloud-init`` `IRC channel on Libera`_ * Join the `cloud-init mailing list`_ -* `Contribute on Github`_ +* :ref:`Contribute on Github` * `Release schedule`_ Having trouble? We would like to help! @@ -107,6 +107,5 @@ Having trouble? We would like to help! .. _IRC channel on Libera: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init .. _cloud-init mailing list: https://launchpad.net/~cloud-init .. _mailing list archive: https://lists.launchpad.net/cloud-init/ -.. _Contribute on Github: https://cloudinit.readthedocs.io/en/latest/topics/contributing.html .. _Release schedule: https://discourse.ubuntu.com/t/cloud-init-release-schedule/32244 .. _Report bugs on Launchpad: https://bugs.launchpad.net/cloud-init/+filebug diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst index 20c81eac..2be78ed5 100644 --- a/doc/rtd/topics/code_review.rst +++ b/doc/rtd/topics/code_review.rst @@ -1,4 +1,5 @@ -******************* +.. _code_review_process: + Code Review Process ******************* diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/topics/module_creation.rst index 69a6a8ae..56cadec4 100644 --- a/doc/rtd/topics/module_creation.rst +++ b/doc/rtd/topics/module_creation.rst @@ -70,14 +70,8 @@ Guidelines module definition in ``/etc/cloud/cloud.cfg[.d]`` has been modified to pass arguments to this module. -* Your module name must be included in `schema-cloud-config.json`_ - under ``$defs.all_modules``, in order to be accepted as a module key in - the :ref:`base config`. - -* Add the new module to `Module Reference`_. - * If your module introduces any new cloud-config keys, you must provide a - schema definition in `schema-cloud-config.json`_. + schema definition in `cloud-init-schema.json`_. * The ``meta`` variable must exist and be of type `MetaSchema`_. * ``id``: The module id. In most cases this will be the filename without @@ -127,9 +121,8 @@ in the ``cloud_final_modules`` section before the ``final-message`` module. .. _MetaSchema: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/config/schema.py#L58 .. _OSFAMILIES: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/distros/__init__.py#L35 .. _settings.py: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/settings.py#L66 -.. _schema-cloud-config.json: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/schemas/schema-cloud-config-v1.json +.. _cloud-init-schema.json: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/schemas/versions.schema.cloud-config.json .. _cloud.cfg.tmpl: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl .. _cloud_init_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L70 .. _cloud_config_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L101 .. _cloud_final_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L144 -.. _Module Reference: https://github.com/canonical/cloud-init/blob/main/doc/rtd/topics/modules.rst diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index b0ad83e4..0274d7bc 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -37,6 +37,9 @@ Module Reference .. automodule:: cloudinit.config.cc_rh_subscription .. automodule:: cloudinit.config.cc_rightscale_userdata .. automodule:: cloudinit.config.cc_rsyslog + +.. _mod-runcmd: + .. automodule:: cloudinit.config.cc_runcmd .. automodule:: cloudinit.config.cc_salt_minion .. automodule:: cloudinit.config.cc_scripts_per_boot diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index b3e1df27..53274417 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -233,7 +233,6 @@ Example: ``addresses: [192.168.14.2/24, 2001:1::1/64]`` **gateway4**: or **gateway6**: *<(scalar)>* -Deprecated, see `netplan#default-routes`_. Set default gateway for IPv4/6, for manual address configuration. This requires setting ``addresses`` too. Gateway IPs must be in a form recognized by ``inet_pton(3)`` @@ -573,6 +572,5 @@ This is a complex example which shows most available features: :: dhcp4: yes .. _netplan: https://netplan.io -.. _netplan#default-routes: https://netplan.io/reference#default-routes .. _netplan#dhcp-overrides: https://netplan.io/reference#dhcp-overrides .. vi: textwidth=79 diff --git a/doc/rtd/topics/tutorial.rst b/doc/rtd/topics/tutorial.rst index e8bed272..aadbe45b 100644 --- a/doc/rtd/topics/tutorial.rst +++ b/doc/rtd/topics/tutorial.rst @@ -48,7 +48,7 @@ following file on your local filesystem at ``/tmp/my-user-data``: Here we are defining our cloud-init user data in the :ref:`cloud-config` format, using the -`runcmd`_ module to define a command to run. When applied, it +:ref:`runcmd module ` to define a command to run. When applied, it should write ``Hello, World!`` to ``/var/tmp/hello-world.txt``. Launch a container with our user data @@ -128,8 +128,8 @@ and we can remove the container using: What's next? ============ -In this tutorial, we used the runcmd_ module to execute a shell command. -The full list of modules available can be found in +In this tutorial, we used the :ref:`runcmd module ` to execute a +shell command. The full list of modules available can be found in :ref:`modules documentation`. Each module contains examples of how to use it. @@ -138,4 +138,3 @@ examples of more common use cases. .. _LXD: https://linuxcontainers.org/lxd/ .. _other installation options: https://linuxcontainers.org/lxd/getting-started-cli/#other-installation-options -.. _runcmd: https://cloudinit.readthedocs.io/en/latest/topics/modules.html#runcmd -- cgit v1.2.1 From 65eb520debc1ce804be05d1191389fe987726aaf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 22 Nov 2022 18:03:08 -0600 Subject: Make 3.12 failures not fail the build (#1873) Since 3.12 is the development release, failures of it shouldn't fail the overall build. --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index d3421e17..45cfbf7e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -152,3 +152,5 @@ matrix: - python: 3.9 - python: 3.8 - python: 3.7 + allow_failures: + - python: 3.12-dev -- cgit v1.2.1 From 4ba809b5fe2dca02999459c09b7db3ebb1731f59 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Wed, 23 Nov 2022 21:54:33 +0100 Subject: status: handle ds not defined in status.json (#1876) Handles any situation where `status.json` does not yet contain datasource information, by gracefully fulfilling a `datasource=None` in `StatusDetails`. LP: #1997559 --- cloudinit/cmd/status.py | 20 +++++++++++++------- tests/unittests/cmd/test_cloud_id.py | 11 +++++++++++ tests/unittests/cmd/test_status.py | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 7 deletions(-) diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index df136288..e1c37a78 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -13,11 +13,12 @@ import json import os import sys from time import gmtime, sleep, strftime -from typing import Any, Dict, List, NamedTuple, Tuple, Union +from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union from cloudinit import safeyaml from cloudinit.cmd.devel import read_cfg_paths from cloudinit.distros import uses_systemd +from cloudinit.helpers import Paths from cloudinit.util import get_cmdline, load_file, load_json CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled" @@ -63,7 +64,7 @@ class StatusDetails(NamedTuple): description: str errors: List[str] last_update: str - datasource: str + datasource: Optional[str] TABULAR_LONG_TMPL = """\ @@ -124,7 +125,7 @@ def handle_status_args(name, args) -> int: sys.stdout.flush() details = get_status_details(paths) sleep(0.25) - details_dict: Dict[str, Union[str, List[str], Dict[str, Any]]] = { + details_dict: Dict[str, Union[None, str, List[str], Dict[str, Any]]] = { "datasource": details.datasource, "boot_status_code": details.boot_status_code.value, "status": details.status.value, @@ -195,7 +196,7 @@ def get_bootstatus(disable_file, paths) -> Tuple[UXAppBootStatusCode, str]: return (bootstatus_code, reason) -def get_status_details(paths=None) -> StatusDetails: +def get_status_details(paths: Optional[Paths] = None) -> StatusDetails: """Return a dict with status, details and errors. @param paths: An initialized cloudinit.helpers.paths object. @@ -206,7 +207,7 @@ def get_status_details(paths=None) -> StatusDetails: status = UXAppStatus.NOT_RUN errors = [] - datasource = "" + datasource: Optional[str] = "" status_v1 = {} status_file = os.path.join(paths.run_dir, "status.json") @@ -228,9 +229,14 @@ def get_status_details(paths=None) -> StatusDetails: status = UXAppStatus.RUNNING description = "Running in stage: {0}".format(value) elif key == "datasource": + if value is None: + # If ds not yet written in status.json, then keep previous + # description + datasource = value + continue description = value - datasource, _, _ = value.partition(" ") - datasource = datasource.lower().replace("datasource", "") + ds, _, _ = value.partition(" ") + datasource = ds.lower().replace("datasource", "") elif isinstance(value, dict): errors.extend(value.get("errors", [])) start = value.get("start") or 0 diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py index 80600555..bf87269a 100644 --- a/tests/unittests/cmd/test_cloud_id.py +++ b/tests/unittests/cmd/test_cloud_id.py @@ -45,6 +45,16 @@ STATUS_DETAILS_RUNNING = status.StatusDetails( ) +STATUS_DETAILS_RUNNING_DS_NONE = status.StatusDetails( + status.UXAppStatus.RUNNING, + status.UXAppBootStatusCode.UNKNOWN, + "", + [], + "", + None, +) + + @pytest.fixture(autouse=True) def setup_mocks(mocker): mocker.patch( @@ -203,6 +213,7 @@ class TestCloudId: (STATUS_DETAILS_DISABLED, 2), (STATUS_DETAILS_NOT_RUN, 3), (STATUS_DETAILS_RUNNING, 0), + (STATUS_DETAILS_RUNNING_DS_NONE, 0), ), ) @mock.patch(M_PATH + "get_status_details") diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py index 6ae3b398..52a02c35 100644 --- a/tests/unittests/cmd/test_status.py +++ b/tests/unittests/cmd/test_status.py @@ -38,6 +38,40 @@ def config(tmpdir): class TestStatus: maxDiff = None + @mock.patch( + M_PATH + "load_file", + return_value=( + '{"v1": {"datasource": null, "init": {"errors": [], "finished": ' + 'null, "start": null}, "init-local": {"errors": [], "finished": ' + 'null, "start": 1669231096.9621563}, "modules-config": ' + '{"errors": [], "finished": null, "start": null},' + '"modules-final": {"errors": [], "finished": null, ' + '"start": null}, "modules-init": {"errors": [], "finished": ' + 'null, "start": null}, "stage": "init-local"} }' + ), + ) + @mock.patch(M_PATH + "os.path.exists", return_value=True) + @mock.patch( + M_PATH + "get_bootstatus", + return_value=( + status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + "Cloud-init enabled by systemd cloud-init-generator", + ), + ) + def test_get_status_details_ds_none( + self, m_get_boot_status, m_p_exists, m_load_json, tmpdir + ): + paths = mock.Mock() + paths.run_dir = str(tmpdir) + assert status.StatusDetails( + status.UXAppStatus.RUNNING, + status.UXAppBootStatusCode.ENABLED_BY_GENERATOR, + "Running in stage: init-local", + [], + "Wed, 23 Nov 2022 19:18:16 +0000", + None, # datasource + ) == status.get_status_details(paths) + @pytest.mark.parametrize( [ "ensured_file", -- cgit v1.2.1 From 893df0d61179a6722c44267f57c57284543c2f12 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 24 Nov 2022 00:29:37 -0700 Subject: Release 22.4.2 (#1878) Bump the version in cloudinit/version.py to 22.4.2 and update ChangeLog. --- ChangeLog | 3 +++ cloudinit/version.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 2e149189..6e6c0fb4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +22.4.2 + - status: handle ds not defined in status.json (#1876) (LP: #1997559) + 22.4.1 - net: skip duplicate mac check for netvsc nic and its VF (#1853) [Anh Vo] (LP: #1844191) diff --git a/cloudinit/version.py b/cloudinit/version.py index 4b739354..f2d62c04 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "22.4.1" +__VERSION__ = "22.4.2" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ -- cgit v1.2.1 From 871edd50fb9da441e4ef2fbebc435dfb8baa4127 Mon Sep 17 00:00:00 2001 From: einsibjarni Date: Sat, 26 Nov 2022 00:26:37 +0000 Subject: Add support for static IPv6 addresses for FreeBSD (#1839) Currently, FreeBSD ignores IPv6 addresses. This PR adds support for static IPv6 addresses --- cloudinit/net/bsd.py | 66 +++++++++++++++++++++++++------ cloudinit/net/freebsd.py | 23 ++++++++++- tests/unittests/distros/test_netconfig.py | 25 +++++++++++- tests/unittests/test_net_freebsd.py | 12 +++++- tools/.github-cla-signers | 1 + 5 files changed, 109 insertions(+), 18 deletions(-) diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py index e8778d27..b23279e5 100644 --- a/cloudinit/net/bsd.py +++ b/cloudinit/net/bsd.py @@ -30,6 +30,7 @@ class BSDRenderer(renderer.Renderer): config = {} self.target = None self.interface_configurations = {} + self.interface_configurations_ipv6 = {} self._postcmds = config.get("postcmds", True) def _ifconfig_entries(self, settings): @@ -62,8 +63,6 @@ class BSDRenderer(renderer.Renderer): LOG.info("Configuring interface %s", device_name) - self.interface_configurations[device_name] = "DHCP" - for subnet in interface.get("subnets", []): if subnet.get("type") == "static": if not subnet.get("netmask"): @@ -85,29 +84,70 @@ class BSDRenderer(renderer.Renderer): "mtu": subnet.get("mtu") or interface.get("mtu"), } + elif subnet.get("type") == "static6": + if not subnet.get("prefix"): + LOG.debug( + "Skipping IP %s, because there is no prefix", + subnet.get("address"), + ) + continue + LOG.debug( + "Configuring dev %s with %s / %s", + device_name, + subnet.get("address"), + subnet.get("prefix"), + ) + + self.interface_configurations_ipv6[device_name] = { + "address": subnet.get("address"), + "prefix": subnet.get("prefix"), + "mtu": subnet.get("mtu") or interface.get("mtu"), + } + elif ( + subnet.get("type") == "dhcp" + or subnet.get("type") == "dhcp4" + ): + self.interface_configurations[device_name] = "DHCP" + def _route_entries(self, settings): routes = list(settings.iter_routes()) for interface in settings.iter_interfaces(): subnets = interface.get("subnets", []) for subnet in subnets: - if subnet.get("type") != "static": + if subnet.get("type") == "static": + gateway = subnet.get("gateway") + if gateway and len(gateway.split(".")) == 4: + routes.append( + { + "network": "0.0.0.0", + "netmask": "0.0.0.0", + "gateway": gateway, + } + ) + elif subnet.get("type") == "static6": + gateway = subnet.get("gateway") + if gateway and len(gateway.split(":")) > 1: + routes.append( + { + "network": "::", + "prefix": "0", + "gateway": gateway, + } + ) + else: continue - gateway = subnet.get("gateway") - if gateway and len(gateway.split(".")) == 4: - routes.append( - { - "network": "0.0.0.0", - "netmask": "0.0.0.0", - "gateway": gateway, - } - ) routes += subnet.get("routes", []) + for route in routes: network = route.get("network") if not network: LOG.debug("Skipping a bad route entry") continue - netmask = route.get("netmask") + netmask = ( + route.get("netmask") + if route.get("netmask") + else route.get("prefix") + ) gateway = route.get("gateway") self.set_route(network, netmask, gateway) diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py index ec42b60c..415f4a5a 100644 --- a/cloudinit/net/freebsd.py +++ b/cloudinit/net/freebsd.py @@ -17,14 +17,31 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): def write_config(self): for device_name, v in self.interface_configurations.items(): - net_config = "DHCP" if isinstance(v, dict): - net_config = v.get("address") + " netmask " + v.get("netmask") + net_config = "inet %s netmask %s" % ( + v.get("address"), + v.get("netmask"), + ) mtu = v.get("mtu") if mtu: net_config += " mtu %d" % mtu + elif v == "DHCP": + net_config = "DHCP" self.set_rc_config_value("ifconfig_" + device_name, net_config) + for device_name, v in self.interface_configurations_ipv6.items(): + if isinstance(v, dict): + net_config = "inet6 %s/%d" % ( + v.get("address"), + v.get("prefix"), + ) + mtu = v.get("mtu") + if mtu: + net_config += " mtu %d" % mtu + self.set_rc_config_value( + "ifconfig_%s_ipv6" % device_name, net_config + ) + def start_services(self, run=False): if not run: LOG.debug("freebsd generate postcmd disabled") @@ -58,6 +75,8 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): def set_route(self, network, netmask, gateway): if network == "0.0.0.0": self.set_rc_config_value("defaultrouter", gateway) + elif network == "::": + self.set_rc_config_value("ipv6_defaultrouter", gateway) else: route_name = "route_net%d" % self._route_cpt route_cmd = "-route %s/%s %s" % (network, netmask, gateway) diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index e2694b09..236cc09f 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -363,7 +363,7 @@ class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase): } rc_conf_expected = """\ defaultrouter=192.168.1.254 -ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' +ifconfig_eth0='inet 192.168.1.5 netmask 255.255.255.0' ifconfig_eth1=DHCP """ @@ -377,6 +377,27 @@ ifconfig_eth1=DHCP expected_cfgs=expected_cfgs.copy(), ) + @mock.patch("cloudinit.net.get_interfaces_by_mac") + def test_apply_network_config_freebsd_ipv6_standard(self, ifaces_mac): + ifaces_mac.return_value = { + "00:15:5d:4c:73:00": "eth0", + } + rc_conf_expected = """\ +ipv6_defaultrouter=2607:f0d0:1002:0011::1 +ifconfig_eth1=DHCP +ifconfig_eth0_ipv6='inet6 2607:f0d0:1002:0011::2/64' +""" + + expected_cfgs = { + "/etc/rc.conf": rc_conf_expected, + "/etc/resolv.conf": "", + } + self._apply_and_verify_freebsd( + self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy(), + ) + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_apply_network_config_freebsd_ifrename(self, ifaces_mac): ifaces_mac.return_value = { @@ -385,7 +406,7 @@ ifconfig_eth1=DHCP rc_conf_expected = """\ ifconfig_vtnet0_name=eth0 defaultrouter=192.168.1.254 -ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' +ifconfig_eth0='inet 192.168.1.5 netmask 255.255.255.0' ifconfig_eth1=DHCP """ diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index 1288c259..4121e404 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -16,6 +16,14 @@ config: - address: 172.20.80.129/25 type: static type: physical +- id: eno2 + mac_address: 08:94:ef:51:ae:e1 + mtu: 1470 + name: eno2 + subnets: + - address: fd12:3456:789a:1::1/64 + type: static6 + type: physical version: 1 """ @@ -76,6 +84,8 @@ class TestFreeBSDRoundTrip(CiTestCase): "/etc/rc.conf": ( "# dummy rc.conf\n" "ifconfig_eno1=" - "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n" + "'inet 172.20.80.129 netmask 255.255.255.128 mtu 1470'\n" + "ifconfig_eno2_ipv6=" + "'inet6 fd12:3456:789a:1::1/64 mtu 1470'\n" ), } diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 8adca985..a6bd08cc 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -36,6 +36,7 @@ dhensby eandersson eb3095 edudobay +einsibjarni emmanuelthome eslerm esposem -- cgit v1.2.1 From 198303eafde97d116bf5a1a3f36a5353f3656972 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Mon, 28 Nov 2022 21:08:58 +0100 Subject: test: fix lxd preseed managed network config (#1881) Remove managed key in network config on LXD preseed configs as it is not a valid key in LXD > v4. --- tests/integration_tests/modules/test_lxd.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration_tests/modules/test_lxd.py b/tests/integration_tests/modules/test_lxd.py index 55d82a54..44cf5ee5 100644 --- a/tests/integration_tests/modules/test_lxd.py +++ b/tests/integration_tests/modules/test_lxd.py @@ -46,7 +46,6 @@ lxd: ipv4.address: auto ipv6.address: auto description: "" - managed: false name: lxdbr0 type: "" storage_pools: -- cgit v1.2.1 From 6795bb8c2950d6873bb00a1b4c287e9409c8f46f Mon Sep 17 00:00:00 2001 From: s-makin Date: Tue, 29 Nov 2022 06:58:32 +0000 Subject: docs: uprate analyze to performance page As previously discussed, the Analyze page contains the content we want to present under the "Performance" topic and should be uprated. I have changed the title of the "Analyze" page to "Performance" so that it shows correctly in the LHS menu. The anchors are the same, and I have not changed the file name so as to avoid having to update any links. I took the opportunity to make some minor tidying edits to the text. Hopefully these will add clarity and not change the intended meaning. --- doc/rtd/topics/analyze.rst | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/rtd/topics/analyze.rst b/doc/rtd/topics/analyze.rst index 61213e28..afc3a5ef 100644 --- a/doc/rtd/topics/analyze.rst +++ b/doc/rtd/topics/analyze.rst @@ -1,9 +1,9 @@ .. _analyze: -Analyze -******* +Performance +*********** -The analyze subcommand was added to cloud-init in order to help analyze +The ``analyze`` subcommand was added to cloud-init to help analyze cloud-init boot time performance. It is loosely based on systemd-analyze where there are four subcommands: @@ -27,8 +27,8 @@ The analyze command requires one of the four subcommands: Availability ============ -The analyze subcommand is generally available across all distributions with the -exception of Gentoo and FreeBSD. +The ``analyze`` subcommand is generally available across all distributions, +with the exception of Gentoo and FreeBSD. Subcommands =========== @@ -37,7 +37,7 @@ Blame ----- The ``blame`` action matches ``systemd-analyze blame`` where it prints, in -descending order, the units that took the longest to run. This output is +descending order, the units that took the longest to run. This output is highly useful for examining where cloud-init is spending its time during execution. @@ -110,9 +110,9 @@ Show The ``show`` action is similar to ``systemd-analyze critical-chain`` which prints a list of units, the time they started and how long they took. -Cloud-init has four stages and within each stage a number of modules may run -depending on configuration. ``cloudinit-analyze show`` will, for each boot, -print this information and a summary total time, per boot. +Cloud-init has four :ref:`boot stages`, and within each stage a number of modules may +run depending on configuration. ``cloudinit-analyze show`` will, for each boot, +print this information and a summary of the total time. The following is an abbreviated example of the show output: -- cgit v1.2.1 From 68b94712ae31de092af5e85a9bbc70c5652bd735 Mon Sep 17 00:00:00 2001 From: Manasseh Zhou <2705803+ManassehZhou@users.noreply.github.com> Date: Thu, 1 Dec 2022 05:47:40 +0800 Subject: feat: add support aliyun metadata security harden mode (#1865) Currently, Alibaba cloud provides a security hardening mode for its metadata server, which is alike IMDSv2, and we should support it. Detailed information be found here: https://www.alibabacloud.com/help/en/elastic-compute-service/latest/view-instance-metadata#concept-dwj-y1x-wgb --- cloudinit/sources/DataSourceAliYun.py | 5 ++ cloudinit/sources/DataSourceEc2.py | 90 ++++++++++++++++++++++++---------- tests/unittests/sources/test_aliyun.py | 10 ++++ tests/unittests/sources/test_ec2.py | 12 ++++- tools/.github-cla-signers | 1 + 5 files changed, 90 insertions(+), 28 deletions(-) diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 6804274e..58e8755f 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -18,6 +18,11 @@ class DataSourceAliYun(EC2.DataSourceEc2): min_metadata_version = "2016-01-01" extended_metadata_versions: List[str] = [] + # Aliyun metadata server security enhanced mode overwrite + @property + def imdsv2_token_put_header(self): + return "X-aliyun-ecs-metadata-token" + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): hostname = self.metadata.get("hostname") is_default = False diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 139ec7e4..44665b26 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -30,12 +30,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" -API_TOKEN_ROUTE = "latest/api/token" -AWS_TOKEN_TTL_SECONDS = "21600" -AWS_TOKEN_PUT_HEADER = "X-aws-ec2-metadata-token" -AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds" -AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER] - class CloudNames: ALIYUN = "aliyun" @@ -57,6 +51,10 @@ def skip_404_tag_errors(exception): return exception.code == 404 and "meta-data/tags/" in exception.url +# Cloud platforms that support IMDSv2 style metadata server +IDMSV2_SUPPORTED_CLOUD_PLATFORMS = [CloudNames.AWS, CloudNames.ALIYUN] + + class DataSourceEc2(sources.DataSource): dsname = "Ec2" @@ -192,6 +190,27 @@ class DataSourceEc2(sources.DataSource): self._platform_type = DataSourceEc2.dsname.lower() return self._platform_type + # IMDSv2 related parameters from the ec2 metadata api document + @property + def api_token_route(self): + return "latest/api/token" + + @property + def imdsv2_token_ttl_seconds(self): + return "21600" + + @property + def imdsv2_token_put_header(self): + return "X-aws-ec2-metadata-token" + + @property + def imdsv2_token_req_header(self): + return self.imdsv2_token_put_header + "-ttl-seconds" + + @property + def imdsv2_token_redact(self): + return [self.imdsv2_token_put_header, self.imdsv2_token_req_header] + def get_metadata_api_version(self): """Get the best supported api version from the metadata service. @@ -208,7 +227,9 @@ class DataSourceEc2(sources.DataSource): url = url_tmpl.format(self.metadata_address, api_ver) try: resp = uhelp.readurl( - url=url, headers=headers, headers_redact=AWS_TOKEN_REDACT + url=url, + headers=headers, + headers_redact=self.imdsv2_token_redact, ) except uhelp.UrlError as e: LOG.debug("url %s raised exception %s", url, e) @@ -232,7 +253,7 @@ class DataSourceEc2(sources.DataSource): api_version, self.metadata_address, headers_cb=self._get_headers, - headers_redact=AWS_TOKEN_REDACT, + headers_redact=self.imdsv2_token_redact, exception_cb=self._refresh_stale_aws_token_cb, ).get("document", {}) return self.identity.get( @@ -248,12 +269,12 @@ class DataSourceEc2(sources.DataSource): the instance owner has disabled the IMDS HTTP endpoint or the network topology conflicts with the configured hop-limit. """ - if self.cloud_name != CloudNames.AWS: + if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: return urls = [] url2base = {} - url_path = API_TOKEN_ROUTE + url_path = self.api_token_route request_method = "PUT" for url in mdurls: cur = "{0}/{1}".format(url, url_path) @@ -275,7 +296,7 @@ class DataSourceEc2(sources.DataSource): headers_cb=self._get_headers, exception_cb=self._imds_exception_cb, request_method=request_method, - headers_redact=AWS_TOKEN_REDACT, + headers_redact=self.imdsv2_token_redact, connect_synchronously=False, ) except uhelp.UrlError: @@ -320,7 +341,10 @@ class DataSourceEc2(sources.DataSource): # If we could not get an API token, then we assume the IMDS # endpoint was disabled and we move on without a data source. # Fallback to IMDSv1 if not running on EC2 - if not metadata_address and self.cloud_name != CloudNames.AWS: + if ( + not metadata_address + and self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS + ): # if we can't get a token, use instance-id path urls = [] url2base = {} @@ -339,7 +363,7 @@ class DataSourceEc2(sources.DataSource): max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds, status_cb=LOG.warning, - headers_redact=AWS_TOKEN_REDACT, + headers_redact=self.imdsv2_token_redact, headers_cb=self._get_headers, request_method=request_method, ) @@ -350,7 +374,7 @@ class DataSourceEc2(sources.DataSource): if metadata_address: self.metadata_address = metadata_address LOG.debug("Using metadata source: '%s'", self.metadata_address) - elif self.cloud_name == CloudNames.AWS: + elif self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: LOG.warning("IMDS's HTTP endpoint is probably disabled") else: LOG.critical( @@ -531,9 +555,9 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return {} api_version = self.get_metadata_api_version() - redact = AWS_TOKEN_REDACT + redact = self.imdsv2_token_redact crawled_metadata = {} - if self.cloud_name == CloudNames.AWS: + if self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: exc_cb = self._refresh_stale_aws_token_cb exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb skip_cb = None @@ -577,22 +601,26 @@ class DataSourceEc2(sources.DataSource): crawled_metadata["_metadata_api_version"] = api_version return crawled_metadata - def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS): + def _refresh_api_token(self, seconds=None): """Request new metadata API token. @param seconds: The lifetime of the token in seconds @return: The API token or None if unavailable. """ - if self.cloud_name != CloudNames.AWS: + if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: return None + + if seconds is None: + seconds = self.imdsv2_token_ttl_seconds + LOG.debug("Refreshing Ec2 metadata API token") - request_header = {AWS_TOKEN_REQ_HEADER: seconds} - token_url = "{}/{}".format(self.metadata_address, API_TOKEN_ROUTE) + request_header = {self.imdsv2_token_req_header: seconds} + token_url = "{}/{}".format(self.metadata_address, self.api_token_route) try: response = uhelp.readurl( token_url, headers=request_header, - headers_redact=AWS_TOKEN_REDACT, + headers_redact=self.imdsv2_token_redact, request_method="PUT", ) except uhelp.UrlError as e: @@ -653,20 +681,22 @@ class DataSourceEc2(sources.DataSource): If _api_token is unset on AWS, attempt to refresh the token via a PUT and then return the updated token header. """ - if self.cloud_name != CloudNames.AWS: + if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: return {} - # Request a 6 hour token if URL is API_TOKEN_ROUTE - request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS} - if API_TOKEN_ROUTE in url: + # Request a 6 hour token if URL is api_token_route + request_token_header = { + self.imdsv2_token_req_header: self.imdsv2_token_ttl_seconds + } + if self.api_token_route in url: return request_token_header if not self._api_token: # If we don't yet have an API token, get one via a PUT against - # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due + # api_token_route. This _api_token may get unset by a 403 due # to an invalid or expired token self._api_token = self._refresh_api_token() if not self._api_token: return {} - return {AWS_TOKEN_PUT_HEADER: self._api_token} + return {self.imdsv2_token_put_header: self._api_token} class DataSourceEc2Local(DataSourceEc2): @@ -746,6 +776,11 @@ def warn_if_necessary(cfgval, cfg): warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep) +def identify_aliyun(data): + if data["product_name"] == "Alibaba Cloud ECS": + return CloudNames.ALIYUN + + def identify_aws(data): # data is a dictionary returned by _collect_platform_data. if data["uuid"].startswith("ec2") and ( @@ -788,6 +823,7 @@ def identify_platform(): identify_zstack, identify_e24cloud, identify_outscale, + identify_aliyun, lambda x: CloudNames.UNKNOWN, ) for checker in checks: diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py index fe4e54b5..6ceaf8f4 100644 --- a/tests/unittests/sources/test_aliyun.py +++ b/tests/unittests/sources/test_aliyun.py @@ -98,6 +98,15 @@ class TestAliYunDatasource(test_helpers.ResponsesTestCase): "instance-identity", ) + @property + def token_url(self): + return os.path.join( + self.metadata_address, + "latest", + "api", + "token", + ) + def register_mock_metaserver(self, base_url, data): def register_helper(register, base_url, body): if isinstance(body, str): @@ -127,6 +136,7 @@ class TestAliYunDatasource(test_helpers.ResponsesTestCase): self.register_mock_metaserver(self.metadata_url, self.default_metadata) self.register_mock_metaserver(self.userdata_url, self.default_userdata) self.register_mock_metaserver(self.identity_url, self.default_identity) + self.responses.add(responses.PUT, self.token_url, "API-TOKEN") def _test_get_data(self): self.assertEqual(self.ds.metadata, self.default_metadata) diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py index 4c832da7..4dd7c497 100644 --- a/tests/unittests/sources/test_ec2.py +++ b/tests/unittests/sources/test_ec2.py @@ -303,7 +303,7 @@ def register_mock_metaserver(base_url, data, responses_mock=None): def myreg(*argc, **kwargs): url, body = argc - method = responses.PUT if ec2.API_TOKEN_ROUTE in url else responses.GET + method = responses.PUT if "latest/api/token" in url else responses.GET status = kwargs.get("status", 200) return responses_mock.add(method, url, body, status=status) @@ -1179,6 +1179,16 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): unspecial.update(**kwargs) return unspecial + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") + def test_identify_aliyun(self, m_collect): + """aliyun should be identified if product name equals to + Alibaba Cloud ECS + """ + m_collect.return_value = self.collmock( + product_name="Alibaba Cloud ECS" + ) + self.assertEqual(ec2.CloudNames.ALIYUN, ec2.identify_platform()) + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_zstack(self, m_collect): """zstack should be identified if chassis-asset-tag diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index a6bd08cc..a7c2b17c 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -72,6 +72,7 @@ lucendio lungj magnetikonline mal +ManassehZhou mamercad manuelisimo marlluslustosa -- cgit v1.2.1 From 7a93f1cad75715696227aa0893ff15156eaa7943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mina=20Gali=C4=87?= Date: Wed, 30 Nov 2022 22:00:11 +0000 Subject: FreeBSD init: use cloudinit_enable as only rcvar (#1875) All components of cloudinit need to run, and in a specific order. If cloudinit is to be enabled, it should only rely on one variable. This change better encodes that, than #161 Sponsored by: FreeBSD Foundation --- sysvinit/freebsd/cloudconfig | 4 ++-- sysvinit/freebsd/cloudfinal | 4 ++-- sysvinit/freebsd/cloudinit | 2 +- sysvinit/freebsd/cloudinitlocal | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig index fb604f4d..13c47280 100755 --- a/sysvinit/freebsd/cloudconfig +++ b/sysvinit/freebsd/cloudconfig @@ -21,8 +21,8 @@ cloudconfig_start() ${command} modules --mode config } -load_rc_config $name +load_rc_config 'cloudinit' -: ${cloudconfig_enable="NO"} +: ${cloudinit_enable="NO"} run_rc_command "$1" diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal index 72047653..76a584ec 100755 --- a/sysvinit/freebsd/cloudfinal +++ b/sysvinit/freebsd/cloudfinal @@ -21,8 +21,8 @@ cloudfinal_start() ${command} modules --mode final } -load_rc_config $name +load_rc_config 'cloudinit' -: ${cloudfinal_enable="NO"} +: ${cloudinit_enable="NO"} run_rc_command "$1" diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit index d26f3d0f..679adf5d 100755 --- a/sysvinit/freebsd/cloudinit +++ b/sysvinit/freebsd/cloudinit @@ -21,7 +21,7 @@ cloudinit_start() ${command} init } -load_rc_config $name +load_rc_config 'cloudinit' : ${cloudinit_enable="NO"} diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal index cb67b4a2..d6c3579e 100755 --- a/sysvinit/freebsd/cloudinitlocal +++ b/sysvinit/freebsd/cloudinitlocal @@ -21,8 +21,8 @@ cloudlocal_start() ${command} init --local } -load_rc_config $name +load_rc_config 'cloudinit' -: ${cloudinitlocal_enable="NO"} +: ${cloudinit_enable="NO"} run_rc_command "$1" -- cgit v1.2.1 From 5e6ecc615318b48e2b14c2fd1f78571522848b4e Mon Sep 17 00:00:00 2001 From: Louis Abel Date: Fri, 2 Dec 2022 12:07:04 -0700 Subject: Append derivatives to is_rhel list in cloud.cfg.tmpl (#1887) This commit adds Rocky Linux, AlmaLinux, CloudLinux, EuroLinux, Miracle Linux, and Virtuozzo to the is_rhel list. Recent downstream patch from Red Hat causes issues with RHEL derivatives with the cloud.cfg template, which leads to derivatives having to make small changes to bring back expected functionality. --- config/cloud.cfg.tmpl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 0f234a7d..fdd5a357 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -3,7 +3,8 @@ # The top level settings are used as module # and base configuration. {% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %} -{% set is_rhel = variant in ["rhel", "centos"] %} +{% set is_rhel = variant in ["almalinux", "centos", "cloudlinux", "eurolinux", + "miraclelinux", "rhel", "rocky", "virtuozzo" ] %} {% if is_bsd %} syslog_fix_perms: root:wheel {% elif variant in ["suse"] %} @@ -34,8 +35,7 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["almalinux", "alpine", "amazon", "cloudlinux", "eurolinux", - "fedora", "miraclelinux", "openEuler", "openmandriva", "rocky", "virtuozzo"] or is_rhel %} +{% if variant in ["alpine", "amazon", "fedora", "openEuler", "openmandriva"] or is_rhel %} {% if is_rhel %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2'] {% else %} @@ -197,9 +197,9 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["almalinux", "alpine", "amazon", "arch", "cloudlinux", "debian", - "eurolinux", "fedora", "freebsd", "gentoo", "netbsd", "mariner", "miraclelinux", "openbsd", "openEuler", - "openmandriva", "photon", "rocky", "suse", "ubuntu", "virtuozzo"] or is_rhel %} +{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "freebsd", + "gentoo", "netbsd", "mariner", "openbsd", "openEuler", + "openmandriva", "photon", "suse", "ubuntu"] or is_rhel %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} distro: dragonflybsd @@ -252,15 +252,15 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["almalinux", "alpine", "amazon", "arch", "cloudlinux", "eurolinux", - "fedora", "gentoo", "miraclelinux", "openEuler", "openmandriva", "rocky", "suse", "virtuozzo"] or is_rhel %} +{% elif variant in ["alpine", "amazon", "arch", "fedora", + "gentoo", "openEuler", "openmandriva", "suse"] or is_rhel %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} name: ec2-user lock_passwd: True gecos: EC2 Default User -{% elif is_rhel %} +{% elif variant in ["rhel", "centos"] %} name: cloud-user lock_passwd: true gecos: Cloud User -- cgit v1.2.1 From b12342eb64251bbf2c97514ffcee6eb7b63a0894 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mina=20Gali=C4=87?= Date: Wed, 7 Dec 2022 01:15:41 +0000 Subject: Fix exception in BSD networking code-path (#1894) overriding __init__() means we need to call super().__init__() Sponsored By: FreeBSD Foundation --- cloudinit/distros/networking.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py index 7edfe965..28ee1b43 100644 --- a/cloudinit/distros/networking.py +++ b/cloudinit/distros/networking.py @@ -190,6 +190,7 @@ class BSDNetworking(Networking): self.ifc = ifconfig.Ifconfig() self.ifs = {} self._update_ifs() + super().__init__() def _update_ifs(self): ifconf = subp.subp(["ifconfig", "-a"]) -- cgit v1.2.1 From aecdcbf86fbf20285da5611513ced3183fb8205a Mon Sep 17 00:00:00 2001 From: einsibjarni Date: Wed, 7 Dec 2022 10:38:10 +0000 Subject: Add support for setting uid when creating users on FreeBSD (#1888) * Add support for setting uid when creating users on FreeBSD * Test if uid in config is used when creating user in FreeBSD * Merge two branches in if statement. Unit test uid in pw call for create user on freebsd. --- cloudinit/distros/freebsd.py | 5 ++-- tests/unittests/config/test_cc_users_groups.py | 6 +++-- tests/unittests/distros/test_freebsd.py | 34 ++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index b9fd37b8..4268abe6 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -76,6 +76,7 @@ class Distro(cloudinit.distros.bsd.BSD): "groups": "-G", "shell": "-s", "inactive": "-E", + "uid": "-u", } pw_useradd_flags = { "no_user_group": "--no-user-group", @@ -84,8 +85,8 @@ class Distro(cloudinit.distros.bsd.BSD): } for key, val in kwargs.items(): - if key in pw_useradd_opts and val and isinstance(val, str): - pw_useradd_cmd.extend([pw_useradd_opts[key], val]) + if key in pw_useradd_opts and val and isinstance(val, (str, int)): + pw_useradd_cmd.extend([pw_useradd_opts[key], str(val)]) elif key in pw_useradd_flags and val: pw_useradd_cmd.append(pw_useradd_flags[key]) diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py index 00eca93b..6067f8e9 100644 --- a/tests/unittests/config/test_cc_users_groups.py +++ b/tests/unittests/config/test_cc_users_groups.py @@ -87,7 +87,9 @@ class TestHandleUsersGroups(CiTestCase): m_linux_group, ): """When users in config, create users with freebsd.create_user.""" - cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config + cfg = { + "users": ["default", {"name": "me2", "uid": 1234}] + } # merged cloud-config # System config defines a default user for the distro. sys_cfg = { "default_user": { @@ -115,7 +117,7 @@ class TestHandleUsersGroups(CiTestCase): lock_passwd=True, shell="/bin/tcsh", ), - mock.call("me2", default=False), + mock.call("me2", uid=1234, default=False), ], ) m_fbsd_group.assert_not_called() diff --git a/tests/unittests/distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py index 22be5098..70f2c7c6 100644 --- a/tests/unittests/distros/test_freebsd.py +++ b/tests/unittests/distros/test_freebsd.py @@ -2,9 +2,43 @@ import os +from cloudinit.distros.freebsd import Distro, FreeBSDNetworking from cloudinit.util import find_freebsd_part, get_path_dev_freebsd +from tests.unittests.distros import _get_distro from tests.unittests.helpers import CiTestCase, mock +M_PATH = "cloudinit.distros.freebsd." + + +class TestFreeBSD: + @mock.patch(M_PATH + "subp.subp") + def test_add_user(self, m_subp, mocker): + mocker.patch.object(Distro, "networking_cls", spec=FreeBSDNetworking) + distro = _get_distro("freebsd") + distro.add_user("me2", uid=1234, default=False) + assert [ + mock.call( + [ + "pw", + "useradd", + "-n", + "me2", + "-u", + "1234", + "-d/usr/home/me2", + "-m", + ], + logstring=[ + "pw", + "useradd", + "-n", + "me2", + "-d/usr/home/me2", + "-m", + ], + ) + ] == m_subp.call_args_list + class TestDeviceLookUp(CiTestCase): @mock.patch("cloudinit.subp.subp") -- cgit v1.2.1 From c273d0e9f262d61b275369ce5587151b0df0dfe6 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Wed, 7 Dec 2022 15:52:29 +0100 Subject: lint: fix tip-flake8 and tip-mypy (#1896) --- cloudinit/config/cc_ansible.py | 2 +- cloudinit/config/schema.py | 2 +- cloudinit/net/activators.py | 2 +- cloudinit/net/ephemeral.py | 6 +++--- cloudinit/net/network_state.py | 2 +- cloudinit/stages.py | 2 +- cloudinit/url_helper.py | 8 ++++---- pyproject.toml | 1 + tests/integration_tests/modules/test_combined.py | 4 ++-- tests/unittests/config/test_cc_ansible.py | 2 +- 10 files changed, 16 insertions(+), 15 deletions(-) diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py index d8fee517..876dbc6b 100644 --- a/cloudinit/config/cc_ansible.py +++ b/cloudinit/config/cc_ansible.py @@ -132,7 +132,7 @@ class AnsiblePullPip(AnsiblePull): if not self.is_installed(): # bootstrap pip if required try: - import pip # type: ignore # noqa: F401 + import pip # noqa: F401 except ImportError: self.distro.install_packages(self.distro.pip_package_name) cmd = [sys.executable, "-m", "pip", "install"] diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 42792985..88590ace 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -382,7 +382,7 @@ def validate_cloudconfig_metaschema(validator, schema: dict, throw=True): def validate_cloudconfig_schema( config: dict, - schema: dict = None, + schema: Optional[dict] = None, strict: bool = False, strict_metaschema: bool = False, log_details: bool = True, diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py index b6af3770..7d11a02c 100644 --- a/cloudinit/net/activators.py +++ b/cloudinit/net/activators.py @@ -97,7 +97,7 @@ class IfUpDownActivator(NetworkActivator): # E.g., NetworkManager has a ifupdown plugin that requires the name # of a specific connection. @staticmethod - def available(target: str = None) -> bool: + def available(target: Optional[str] = None) -> bool: """Return true if ifupdown can be used on this system.""" return eni_available(target=target) diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index 5ce41694..1dfde6e0 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -4,7 +4,7 @@ """ import contextlib import logging -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional import cloudinit.net as net from cloudinit import subp @@ -35,7 +35,7 @@ class EphemeralIPv4Network: prefix_or_mask, broadcast, router=None, - connectivity_url_data: Dict[str, Any] = None, + connectivity_url_data: Optional[Dict[str, Any]] = None, static_routes=None, ): """Setup context manager and validate call signature. @@ -313,7 +313,7 @@ class EphemeralDHCPv4: def __init__( self, iface=None, - connectivity_url_data: Dict[str, Any] = None, + connectivity_url_data: Optional[Dict[str, Any]] = None, dhcp_log_func=None, tmp_dir=None, ): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index e0adb110..36cd582e 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -251,7 +251,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): self, version=NETWORK_STATE_VERSION, config=None, - renderer=None, # type: Optional[Renderer] + renderer: "Optional[Renderer]" = None, ): self._version = version self._config = config diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 635a31e8..56e9774a 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -43,7 +43,7 @@ def update_event_enabled( datasource: sources.DataSource, cfg: dict, event_source_type: EventType, - scope: EventScope = None, + scope: Optional[EventScope] = None, ) -> bool: """Determine if a particular EventType is enabled. diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 3d4e4639..d6d0afa6 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -375,7 +375,7 @@ def _run_func_with_delay( addr: str, timeout: int, event: threading.Event, - delay: float = None, + delay: Optional[float] = None, ) -> Any: """Execute func with optional delay""" if delay: @@ -476,11 +476,11 @@ def wait_for_url( max_wait=None, timeout=None, status_cb: Callable = LOG.debug, # some sources use different log levels - headers_cb: Callable = None, + headers_cb: Optional[Callable] = None, headers_redact=None, sleep_time: int = 1, - exception_cb: Callable = None, - sleep_time_cb: Callable[[Any, int], int] = None, + exception_cb: Optional[Callable] = None, + sleep_time_cb: Optional[Callable[[Any, int], int]] = None, request_method: str = "", connect_synchronously: bool = True, async_delay: float = 0.150, diff --git a/pyproject.toml b/pyproject.toml index d566b4a2..1a3fc176 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ module = [ "jsonpatch", "netifaces", "paramiko.*", + "pip.*", "pycloudlib.*", "responses", "serial", diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 32537729..cf5cb199 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -154,8 +154,8 @@ class TestCombined: def test_snap(self, class_client: IntegrationInstance): """Integration test for the snap module. - This test specifies a command to be executed by the ``snap`` module - and then checks that if that command was executed during boot. + This test verify that the snap packages specified in the user-data + were installed by the ``snap`` module during boot. """ client = class_client snap_output = client.execute("snap list") diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py index a0d6bcab..bd8ec9bf 100644 --- a/tests/unittests/config/test_cc_ansible.py +++ b/tests/unittests/config/test_cc_ansible.py @@ -18,7 +18,7 @@ from tests.unittests.helpers import skipUnlessJsonSchema from tests.unittests.util import get_cloud try: - import pip as _pip # type: ignore # noqa: F401 + import pip as _pip # noqa: F401 HAS_PIP = True except ImportError: -- cgit v1.2.1 From 9446bdbd4d4f3ecf8cf14fde589373d860c5a2e4 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 7 Dec 2022 11:11:25 -0700 Subject: doc: add qemu tutorial (#1863) Guide the user through setting up a virtual machine. Introduce commonly used terminology and architecture. Include a debugging page and example script to guide the audience through pitfalls. --- doc/rtd/index.rst | 4 +- doc/rtd/topics/modules.rst | 3 + doc/rtd/topics/tutorial.rst | 140 ------------- doc/rtd/topics/tutorials/lxd.rst | 149 ++++++++++++++ doc/rtd/topics/tutorials/qemu-debugging.rst | 41 ++++ doc/rtd/topics/tutorials/qemu-script.sh | 47 +++++ doc/rtd/topics/tutorials/qemu.rst | 295 ++++++++++++++++++++++++++++ 7 files changed, 537 insertions(+), 142 deletions(-) delete mode 100644 doc/rtd/topics/tutorial.rst create mode 100644 doc/rtd/topics/tutorials/lxd.rst create mode 100644 doc/rtd/topics/tutorials/qemu-debugging.rst create mode 100755 doc/rtd/topics/tutorials/qemu-script.sh create mode 100644 doc/rtd/topics/tutorials/qemu.rst diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index fd062e23..c660c02b 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -34,7 +34,7 @@ projects, contributions, suggestions, fixes and constructive feedback. Having trouble? We would like to help! ************************************** -- Check out the :ref:`lxd_tutorial` if you're new to cloud-init +- Check out the :ref:`tutorial_lxd` if you're new to cloud-init - Try the :ref:`FAQ` for answers to some common questions - You can also search the cloud-init `mailing list archive`_ - Find a bug? `Report bugs on Launchpad`_ @@ -44,7 +44,7 @@ Having trouble? We would like to help! :titlesonly: :caption: Getting Started - topics/tutorial.rst + topics/tutorials/lxd.rst topics/availability.rst topics/boot.rst topics/cli.rst diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index 0274d7bc..20ca61d5 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -61,6 +61,9 @@ Module Reference .. automodule:: cloudinit.config.cc_ubuntu_drivers .. automodule:: cloudinit.config.cc_update_etc_hosts .. automodule:: cloudinit.config.cc_update_hostname + +.. _mod-users_groups: + .. automodule:: cloudinit.config.cc_users_groups .. automodule:: cloudinit.config.cc_wireguard .. automodule:: cloudinit.config.cc_write_files diff --git a/doc/rtd/topics/tutorial.rst b/doc/rtd/topics/tutorial.rst deleted file mode 100644 index aadbe45b..00000000 --- a/doc/rtd/topics/tutorial.rst +++ /dev/null @@ -1,140 +0,0 @@ -.. _lxd_tutorial: - -Tutorial -******** - -In this tutorial, we will create our first cloud-init user data script -and deploy it into an LXD container. We'll be using LXD_ for this tutorial -because it provides first class support for cloud-init user data as well as -systemd support. Because it is container based, it allows for quick -testing and iterating on our user data definition. - -Setup LXD -========= - -Skip this section if you already have LXD_ setup. - -Install LXD ------------ - -.. code-block:: shell-session - - $ sudo snap install lxd - -If you don't have snap, you can install LXD using one of the -`other installation options`_. - -Initialize LXD --------------- - -.. code-block:: shell-session - - $ lxd init --minimal - -The minimal configuration should work fine for our purposes. It can always -be changed at a later time if needed. - -Define our user data -==================== - -Now that LXD is setup, we can define our user data. Create the -following file on your local filesystem at ``/tmp/my-user-data``: - -.. code-block:: yaml - - #cloud-config - runcmd: - - echo 'Hello, World!' > /var/tmp/hello-world.txt - -Here we are defining our cloud-init user data in the -:ref:`cloud-config` format, using the -:ref:`runcmd module ` to define a command to run. When applied, it -should write ``Hello, World!`` to ``/var/tmp/hello-world.txt``. - -Launch a container with our user data -===================================== - -Now that we have LXD setup and our user data defined, we can launch an -instance with our user data: - -.. code-block:: shell-session - - $ lxc launch ubuntu:focal my-test --config=user.user-data="$(cat /tmp/my-user-data)" - -Verify that cloud-init ran successfully -======================================= - -After launching the container, we should be able to connect -to our instance using - -.. code-block:: shell-session - - $ lxc shell my-test - -You should now be in a shell inside the LXD instance. -Before validating the user data, let's wait for cloud-init to complete -successfully: - -.. code-block:: shell-session - - $ cloud-init status --wait - ..... - cloud-init status: done - $ - -We can now verify that cloud-init received the expected user data: - -.. code-block:: shell-session - - $ cloud-init query userdata - #cloud-config - runcmd: - - echo 'Hello, World!' > /var/tmp/hello-world.txt - -We can also assert the user data we provided is a valid cloud-config: - -.. code-block:: shell-session - - $ cloud-init schema --system --annotate - Valid cloud-config: system userdata - $ - -Finally, verify that our user data was applied successfully: - -.. code-block:: shell-session - - $ cat /var/tmp/hello-world.txt - Hello, World! - $ - -We can see that cloud-init has consumed our user data successfully! - -Tear down -========= - -Exit the container shell (i.e., using ``exit`` or ctrl-d). Once we have -exited the container, we can stop the container using: - -.. code-block:: shell-session - - $ lxc stop my-test - -and we can remove the container using: - -.. code-block:: shell-session - - $ lxc rm my-test - -What's next? -============ - -In this tutorial, we used the :ref:`runcmd module ` to execute a -shell command. The full list of modules available can be found in -:ref:`modules documentation`. -Each module contains examples of how to use it. - -You can also head over to the :ref:`examples` page for -examples of more common use cases. - -.. _LXD: https://linuxcontainers.org/lxd/ -.. _other installation options: https://linuxcontainers.org/lxd/getting-started-cli/#other-installation-options diff --git a/doc/rtd/topics/tutorials/lxd.rst b/doc/rtd/topics/tutorials/lxd.rst new file mode 100644 index 00000000..7ffc80cc --- /dev/null +++ b/doc/rtd/topics/tutorials/lxd.rst @@ -0,0 +1,149 @@ +.. _tutorial_lxd: + +Tutorials +********* + +.. toctree:: + :titlesonly: + :hidden: + + qemu.rst + +LXD +=== + +In this tutorial, we will create our first cloud-init user data script +and deploy it into an LXD container. We'll be using LXD_ for this tutorial +because it provides first class support for cloud-init user data as well as +systemd support. Because it is container based, it allows for quick +testing and iterating on our user data definition. + +Setup LXD +========= + +Skip this section if you already have LXD_ setup. + +Install LXD +----------- + +.. code-block:: shell-session + + $ sudo snap install lxd + +If you don't have snap, you can install LXD using one of the +`other installation options`_. + +Initialize LXD +-------------- + +.. code-block:: shell-session + + $ lxd init --minimal + +The minimal configuration should work fine for our purposes. It can always +be changed at a later time if needed. + +Define our user data +==================== + +Now that LXD is setup, we can define our user data. Create the +following file on your local filesystem at ``/tmp/my-user-data``: + +.. code-block:: yaml + + #cloud-config + runcmd: + - echo 'Hello, World!' > /var/tmp/hello-world.txt + +Here we are defining our cloud-init user data in the +:ref:`cloud-config` format, using the +:ref:`runcmd module ` to define a command to run. When applied, it +should write ``Hello, World!`` to ``/var/tmp/hello-world.txt``. + +Launch a container with our user data +===================================== + +Now that we have LXD setup and our user data defined, we can launch an +instance with our user data: + +.. code-block:: shell-session + + $ lxc launch ubuntu:focal my-test --config=user.user-data="$(cat /tmp/my-user-data)" + +Verify that cloud-init ran successfully +======================================= + +After launching the container, we should be able to connect +to our instance using + +.. code-block:: shell-session + + $ lxc shell my-test + +You should now be in a shell inside the LXD instance. +Before validating the user data, let's wait for cloud-init to complete +successfully: + +.. code-block:: shell-session + + $ cloud-init status --wait + ..... + cloud-init status: done + $ + +We can now verify that cloud-init received the expected user data: + +.. code-block:: shell-session + + $ cloud-init query userdata + #cloud-config + runcmd: + - echo 'Hello, World!' > /var/tmp/hello-world.txt + +We can also assert the user data we provided is a valid cloud-config: + +.. code-block:: shell-session + + $ cloud-init schema --system --annotate + Valid cloud-config: system userdata + $ + +Finally, verify that our user data was applied successfully: + +.. code-block:: shell-session + + $ cat /var/tmp/hello-world.txt + Hello, World! + $ + +We can see that cloud-init has consumed our user data successfully! + +Tear down +========= + +Exit the container shell (i.e., using ``exit`` or ctrl-d). Once we have +exited the container, we can stop the container using: + +.. code-block:: shell-session + + $ lxc stop my-test + +and we can remove the container using: + +.. code-block:: shell-session + + $ lxc rm my-test + +What's next? +============ + +In this tutorial, we used the :ref:`runcmd module ` to execute a +shell command. The full list of modules available can be found in +:ref:`modules documentation`. +Each module contains examples of how to use it. + +You can also head over to the :ref:`examples` page for +examples of more common use cases. + +.. _LXD: https://linuxcontainers.org/lxd/ +.. _other installation options: https://linuxcontainers.org/lxd/getting-started-cli/#other-installation-options diff --git a/doc/rtd/topics/tutorials/qemu-debugging.rst b/doc/rtd/topics/tutorials/qemu-debugging.rst new file mode 100644 index 00000000..845f9fcd --- /dev/null +++ b/doc/rtd/topics/tutorials/qemu-debugging.rst @@ -0,0 +1,41 @@ +.. _qemu_debug_info: + +Qemu tutorial debugging +*********************** + +You may wish to test out the commands in this tutorial as a +:download:`script` to check for copy-paste mistakes. + +If you successfully launched the virtual machine, but couldn't log in, +there are a few places to check to debug your setup. + +To debug, answer the following questions: + +Did cloud-init discover the IMDS webserver? +=========================================== + +The webserver should print a message in the terminal for each request it +receives. If it didn't print out any messages when the virtual machine booted, +then cloud-init was unable to obtain the config. Make sure that the webserver +can be locally accessed using ``curl`` or ``wget``. + +.. code-block:: sh + + $ curl 0.0.0.0:8000/user-data + $ curl 0.0.0.0:8000/meta-data + $ curl 0.0.0.0:8000/vendor-data + + +Did the IMDS webserver serve the expected files? +================================================================ + +If the webserver prints out 404 errors when launching Qemu, then first check +that you started the server in the temp directory. + +Were the configurations inside the file correct? +=================================================== +When launching Qemu, if the webserver shows that it succeeded in serving +``user-data``, ``meta-data``, and ``vendor-data``, but you cannot log in, then +you may have provided incorrect cloud-config files. If you can mount a copy of +the virtual machine's filesystem locally to inspect the logs, it should be +possible to get clues about what went wrong. diff --git a/doc/rtd/topics/tutorials/qemu-script.sh b/doc/rtd/topics/tutorials/qemu-script.sh new file mode 100755 index 00000000..19a2cf85 --- /dev/null +++ b/doc/rtd/topics/tutorials/qemu-script.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +TEMP_DIR=temp +IMAGE_URL="https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img" + +# setup +mkdir "$TEMP_DIR" && cd "$TEMP_DIR" || { + echo "Error: Failed to create directory [$TEMP_DIR], aborting early" + exit 1 +} + +wget "$IMAGE_URL" + +# Create user-data, vendor-data, meta-data +cat << EOF > user-data +#cloud-config +password: password +chpasswd: + expire: False +EOF + +cat << EOF > meta-data +instance-id: someid/somehostname +local-hostname: jammy +EOF + +touch vendor-data + +# start ad hoc imds webserver +python3 -m http.server --directory . & + +# start an instance of your image in a virtual machine +qemu-system-x86_64 \ + -net nic \ + -net user \ + -machine accel=kvm:tcg \ + -cpu host \ + -m 512 \ + -nographic \ + -hda jammy-server-cloudimg-amd64.img \ + -smbios type=1,serial=ds='nocloud-net;s=http://10.0.2.2:8000/' + +echo -e "\nTo reuse the image and config files, start the python webserver and " +echo -e "virtual machine from $(pwd), which contains these files:\n$(ls -1)\n" + +# end the python server on exit +trap "trap - SIGTERM && kill -- -$$" EXIT diff --git a/doc/rtd/topics/tutorials/qemu.rst b/doc/rtd/topics/tutorials/qemu.rst new file mode 100644 index 00000000..bf36b3b9 --- /dev/null +++ b/doc/rtd/topics/tutorials/qemu.rst @@ -0,0 +1,295 @@ +.. _tutorial_qemu: + +Qemu Tutorial +************* + +.. toctree:: + :titlesonly: + :hidden: + + qemu-debugging.rst + + + +In this tutorial, we will demonstrate launching an Ubuntu cloud image in a +virtual machine that uses cloud-init to pre-configure the system during boot. + +The goal of this tutorial is to provide a minimal demonstration of cloud-init +that you can use as a development environment to test cloud-init +configurations locally prior to launching in the cloud. + + +Why Qemu? +========= + +Qemu_ is a cross-platform emulator capable of running performant virtual +machines. Qemu is used at the core of a broad range of production operating +system deployments and open source software projects (including libvirt, LXD, +and vagrant) and is capable of running Windows, Linux, and Unix guest operating +systems. While Qemu is flexibile and feature-rich, we are using it because of +the broad support it has due to its broad adoption and ability to run on +\*nix-derived operating systems. + + +What is an IMDS? +================ + +Instance Metadata Service is a service provided by most cloud providers as a +means of providing information to virtual machine instances. This service is +used by cloud providers to expose information to a virtual machine. This +service is used for many different things, and is the primary mechanism for +some clouds to expose cloud-init configuration data to the instance. + + +How does cloud-init use the IMDS? +================================= + +The IMDS uses a private http webserver to provide metadata to each operating +system instance. During early boot, cloud-init sets up network access and +queries this webserver to gather configuration data. This allows cloud-init to +configure your operating system while it boots. + +In this tutorial we emulate this workflow using Qemu and a simple python +webserver. This workflow may be suitable for developing and testing cloud-init +configurations prior to cloud deployments. + + +How to use this tutorial +======================== + +In this tutorial each code block is to be copied and pasted directly +into the terminal then executed. Omit the prompt ``$`` before each command. + +Each code block is preceded by a description of what the command does. + + +Install Qemu +============ + +.. code-block:: sh + + $ sudo apt install qemu-system-x86 + +If you are not using Ubuntu, you can visit Qemu's `install instructions`_ for +additional information. + + +Create a temporary directory +============================ + +This directory will store our cloud image and configuration files for +:ref:`user-data`, :ref:`meta-data`, and +:ref:`vendor-data` + +This tutorial expects that you run all commands from this temporary +directory. Failure to do so will result in an unconfigured virtual +machine. + +Create a temporary directory and make it your current working directory with +``cd``. + +.. code-block:: sh + + $ mkdir temp + $ cd temp + + +Download a cloud image +====================== + +Cloud images typically come with cloud-init pre-installed and configured to run +on first boot. Users should not need to worry about installing cloud-init +unless they are manually creating their own images. In this case we select the +latest Ubuntu LTS_. + +Download the server image using ``wget``. + +.. code-block:: sh + + $ wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img + + +Define our user data +==================== + +Create the following file ``user-data``. This user-data cloud-config +sets the password of the default user and sets it to never expire. For +more details see this module_. + +Execute the following command, which creates a file named ``user-data`` with +configuration data. + +.. code-block:: sh + + $ cat << EOF > user-data + #cloud-config + password: password + chpasswd: + expire: False + + EOF + + +What is user data? +================== + +Before moving forward, let's inspect our user data file. + +.. code-block:: sh + + $ cat user-data + +You should see the following contents: + +.. code-block:: yaml + + #cloud-config + password: password + chpasswd: + expire: False + +The first line starts with ``#cloud-config``, which tells cloud-init +which type of user-data is in the config. Cloud-config is a YAML-based +configuration type that tells cloud-init how to configure the virtual machine +instance. Multiple different format types are supported by cloud-init. See the +:ref:`documentation describing different formats`. + +The second line, ``password: password``, per :ref:`the docs`, +sets the default user's password to ``password``. + +The third and fourth lines direct cloud-init to not require password reset on +first login. + +Define our meta data +==================== + +Execute the following command, which creates a file named ``meta-data`` with +configuration data. + +.. code-block:: sh + + $ cat << EOF > meta-data + instance-id: someid/somehostname + local-hostname: jammy + + EOF + + +Define our vendor data +====================== + +Now create the empty file ``vendor-data`` in your temporary directory. This +will speed up the retry wait time. + +.. code-block:: sh + + $ touch vendor-data + + +Start an ad hoc IMDS webserver +============================== + +In a second terminal, change to your temporary directory and then start the +python webserver (built-in to python). + +.. code-block:: sh + + $ cd temp + $ python3 -m http.server --directory . + + +Launch a virtual machine with our user data +=========================================== + +Switch back to your original terminal so we can launch the virtual machine. +By default, Qemu will print the kernel logs and systemd logs to the terminal +while the operating system boots. This may take a few moments to complete. + +If the output stopped scrolling but you don't see a prompt yet, press ``enter`` +to get to login prompt. + +.. code-block:: sh + + $ qemu-system-x86_64 \ + -net nic \ + -net user \ + -machine accel=kvm:tcg \ + -cpu host \ + -m 512 \ + -nographic \ + -hda jammy-server-cloudimg-amd64.img \ + -smbios type=1,serial=ds='nocloud-net;s=http://10.0.2.2:8000/' + +How is Qemu configured for cloud-init? +====================================== + +When launching Qemu, machine configuration is specified on the command +line. Many things may be configured: memory size, graphical output, networking +information, hard drives and more. + +Examine the last two lines of this command. This one, +``-hda jammy-server-cloudimg-amd64.img``, tells qemu to use the cloud +image as a virtual hard drive. This will cause the virtual machine to +boot Ubuntu which already has cloud-init installed. + +The last line tells cloud-init where it can find user-data using the +:ref:`NoCloud datasource`. During boot cloud-init checks +the ``SMBIOS`` serial number for `ds=nocloud-net`. If found, cloud-init will +use the specified URL to source its userdata config files. In this case we use +the default gateway of the virtual machine (``10.0.2.2``) and default port +number of the python webserver (``8000``), so that cloud-init in the virtual +machine will query the server running on host. + +Verify that cloud-init ran successfully +======================================= + +After launching the virtual machine we should be able to connect to our +instance using the default distro username. + +In this case the default username is ``ubuntu`` and the password we configured +is ``password``. + +If you can log in using the configured password, it worked! + +If you cloudn't log in, see +:ref:`this page for debug information`. + + +Check cloud-init status +======================= + +.. code-block:: sh + + $ cloud-init status --wait + +If you see ``status: done`` in the output, it succeeded! + +If you see a failed status, you'll want to check ``/var/log/cloud-init.log`` +for warning/error messages. + + +Tear down +========= + +Exit the Qemu shell using ``ctrl-a x`` (that's ``ctrl`` and ``a`` +simultaneously, followed by ``x``). + +Stop the python webserver that was started in a different terminal +(``ctrl-c``). + + +What's next? +============ + +In this tutorial, we configured the default user's password. +The full list of modules available can be found in +:ref:`modules documentation`. +The documentation for each module contains examples of how to use it. + +You can also head over to the :ref:`examples` page for +examples of more common use cases. + +.. _Qemu: https://www.qemu.org +.. _module: https://cloudinit.readthedocs.io/en/latest/topics/modules.html#set-passwords +.. _install instructions: https://www.qemu.org/download/#linux +.. _LTS: https://wiki.ubuntu.com/Releases -- cgit v1.2.1 From f75be2ebe15b0dc78092fe47b1ef8d506607e9da Mon Sep 17 00:00:00 2001 From: Nigel Kukard Date: Wed, 7 Dec 2022 23:29:52 +0000 Subject: networkd: Add support for multiple [Route] sections (#1868) Networkd supports multiple [Route] sections within the same file. Currently all [Route] section tags are squashed into one and if there is a default gateway it means defining a device route is not possible as the target is set to the default gateway. This patch adds support for multiple [Route] sections allowing us to support device routes. This is done by tracking each route in the route list individually and ensuring the key-value pairs are maintained within their respective [Route] section. This both maintains backwards compatibility with previous behavior and allows the specification of routes with no destination IP, causing the destination to be added with a device target. --- cloudinit/net/networkd.py | 51 ++++++++++++++++++++++++++++---- tests/unittests/net/test_networkd.py | 57 +++++++++++++++++++++++++++++++++++- 2 files changed, 101 insertions(+), 7 deletions(-) diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index 3591513f..4fd8a9b8 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -28,7 +28,7 @@ class CfgParser: "DHCPv4": [], "DHCPv6": [], "Address": [], - "Route": [], + "Route": {}, } ) @@ -40,6 +40,22 @@ class CfgParser: self.conf_dict[k] = list(dict.fromkeys(self.conf_dict[k])) self.conf_dict[k].sort() + def update_route_section(self, sec, rid, key, val): + """ + For each route section we use rid as a key, this allows us to isolate + this route from others on subsequent calls. + """ + for k in self.conf_dict.keys(): + if k == sec: + if rid not in self.conf_dict[k]: + self.conf_dict[k][rid] = [] + self.conf_dict[k][rid].append(key + "=" + str(val)) + # remove duplicates from list + self.conf_dict[k][rid] = list( + dict.fromkeys(self.conf_dict[k][rid]) + ) + self.conf_dict[k][rid].sort() + def get_final_conf(self): contents = "" for k, v in sorted(self.conf_dict.items()): @@ -50,6 +66,12 @@ class CfgParser: contents += "[" + k + "]\n" contents += e + "\n" contents += "\n" + elif k == "Route": + for n in sorted(v): + contents += "[" + k + "]\n" + for e in sorted(v[n]): + contents += e + "\n" + contents += "\n" else: contents += "[" + k + "]\n" for e in sorted(v): @@ -112,7 +134,11 @@ class Renderer(renderer.Renderer): if "mtu" in iface and iface["mtu"]: cfg.update_section(sec, "MTUBytes", iface["mtu"]) - def parse_routes(self, conf, cfg: CfgParser): + def parse_routes(self, rid, conf, cfg: CfgParser): + """ + Parse a route and use rid as a key in order to isolate the route from + others in the route dict. + """ sec = "Route" route_cfg_map = { "gateway": "Gateway", @@ -130,11 +156,12 @@ class Renderer(renderer.Renderer): continue if k == "network": v += prefix - cfg.update_section(sec, route_cfg_map[k], v) + cfg.update_route_section(sec, rid, route_cfg_map[k], v) def parse_subnets(self, iface, cfg: CfgParser): dhcp = "no" sec = "Network" + rid = 0 for e in iface.get("subnets", []): t = e["type"] if t == "dhcp4" or t == "dhcp": @@ -149,7 +176,10 @@ class Renderer(renderer.Renderer): dhcp = "yes" if "routes" in e and e["routes"]: for i in e["routes"]: - self.parse_routes(i, cfg) + # Use "r" as a dict key prefix for this route to isolate + # it from other sources of routes + self.parse_routes(f"r{rid}", i, cfg) + rid = rid + 1 if "address" in e: subnet_cfg_map = { "address": "Address", @@ -163,7 +193,12 @@ class Renderer(renderer.Renderer): v += "/" + str(e["prefix"]) cfg.update_section("Address", subnet_cfg_map[k], v) elif k == "gateway": - cfg.update_section("Route", subnet_cfg_map[k], v) + # Use "a" as a dict key prefix for this route to + # isolate it from other sources of routes + cfg.update_route_section( + "Route", f"a{rid}", subnet_cfg_map[k], v + ) + rid = rid + 1 elif k == "dns_nameservers" or k == "dns_search": cfg.update_section(sec, subnet_cfg_map[k], " ".join(v)) @@ -280,8 +315,12 @@ class Renderer(renderer.Renderer): dhcp = self.parse_subnets(iface, cfg) self.parse_dns(iface, cfg, ns) + rid = 0 for route in ns.iter_routes(): - self.parse_routes(route, cfg) + # Use "c" as a dict key prefix for this route to isolate it + # from other sources of routes + self.parse_routes(f"c{rid}", route, cfg) + rid = rid + 1 if ns.version == 2: name: Optional[str] = iface["name"] diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py index 2958231b..bb781b98 100644 --- a/tests/unittests/net/test_networkd.py +++ b/tests/unittests/net/test_networkd.py @@ -195,10 +195,54 @@ Domains=rgrunbla.github.beta.tailscale.net [Route] Gateway=10.0.0.1 + +[Route] Gateway=2a01:4f8:10a:19d2::2 """ +V2_CONFIG_MULTI_SUBNETS = """ +network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.1/24 + - fec0::1/64 + gateway4: 192.168.254.254 + gateway6: "fec0::ffff" + routes: + - to: 169.254.1.1/32 + - to: "fe80::1/128" +""" + +V2_CONFIG_MULTI_SUBNETS_RENDERED = """\ +[Address] +Address=192.168.1.1/24 + +[Address] +Address=fec0::1/64 + +[Match] +Name=eth0 + +[Network] +DHCP=no + +[Route] +Gateway=192.168.254.254 + +[Route] +Gateway=fec0::ffff + +[Route] +Destination=169.254.1.1/32 + +[Route] +Destination=fe80::1/128 + +""" + class TestNetworkdRenderState: def _parse_network_state_from_config(self, config): @@ -307,5 +351,16 @@ class TestNetworkdRenderState: assert rendered_content["eth0"] == V1_CONFIG_MULTI_SUBNETS_RENDERED + def test_networkd_render_v2_multi_subnets(self): + """ + Ensure a device with multiple subnets gets correctly rendered. + + Per systemd-networkd docs, [Route] can only contain a single instance + of Gateway. + """ + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config(V2_CONFIG_MULTI_SUBNETS) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) -# vi: ts=4 expandtab + assert rendered_content["eth0"] == V2_CONFIG_MULTI_SUBNETS_RENDERED -- cgit v1.2.1 From 8ee0d21507f88241da1bc3e6ae954ead1eac0e0d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 8 Dec 2022 08:49:46 -0600 Subject: Add back gateway4/6 deprecation to docs (#1898) It was accidentally removed in f1a9e44e. --- doc/rtd/topics/network-config-format-v2.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index 53274417..b3e1df27 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -233,6 +233,7 @@ Example: ``addresses: [192.168.14.2/24, 2001:1::1/64]`` **gateway4**: or **gateway6**: *<(scalar)>* +Deprecated, see `netplan#default-routes`_. Set default gateway for IPv4/6, for manual address configuration. This requires setting ``addresses`` too. Gateway IPs must be in a form recognized by ``inet_pton(3)`` @@ -572,5 +573,6 @@ This is a complex example which shows most available features: :: dhcp4: yes .. _netplan: https://netplan.io +.. _netplan#default-routes: https://netplan.io/reference#default-routes .. _netplan#dhcp-overrides: https://netplan.io/reference#dhcp-overrides .. vi: textwidth=79 -- cgit v1.2.1 From 9329c531f2a0535cdac1b93b8cf8e1ff32c01484 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Thu, 8 Dec 2022 14:23:36 -0800 Subject: sources/azure: encode health report as utf-8 (#1897) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If utf-8 characters are used in the report, it will fail to encode: azure.py[ERROR]: exception while reporting ready: 'latin-1' codec can't encode characters in position 392-397: Body ('乱写一些单词') is not valid Latin-1. Use body.encode('utf-8') if you want to send it encoded in UTF-8. Explicitly encode document as utf-8. Signed-off-by: Chris Patterson cpatterson@microsoft.com --- cloudinit/sources/helpers/azure.py | 16 ++++++----- tests/unittests/sources/test_azure_helper.py | 42 ++++++++++++++++++---------- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 56f44339..d3467769 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -13,7 +13,7 @@ from contextlib import contextmanager from datetime import datetime from errno import ENOENT from time import sleep, time -from typing import List, Optional, Union +from typing import Callable, List, Optional, TypeVar, Union from xml.etree import ElementTree from xml.sax.saxutils import escape @@ -50,8 +50,10 @@ DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = ( "for more information on remediation." ) +T = TypeVar("T") -def azure_ds_telemetry_reporter(func): + +def azure_ds_telemetry_reporter(func: Callable[..., T]) -> Callable[..., T]: def impl(*args, **kwargs): with events.ReportEventStack( name=func.__name__, @@ -335,7 +337,7 @@ def http_with_retries( url: str, *, headers: dict, - data: Optional[str] = None, + data: Optional[bytes] = None, retry_sleep: int = 5, timeout_minutes: int = 20, ) -> url_helper.UrlResponse: @@ -440,7 +442,7 @@ class AzureEndpointHttpClient: return http_with_retries(url, headers=headers) def post( - self, url, data=None, extra_headers=None + self, url, data: Optional[bytes] = None, extra_headers=None ) -> url_helper.UrlResponse: headers = self.headers if extra_headers is not None: @@ -752,7 +754,7 @@ class GoalStateHealthReporter: status: str, substatus=None, description=None, - ) -> str: + ) -> bytes: health_detail = "" if substatus is not None: health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format( @@ -770,10 +772,10 @@ class GoalStateHealthReporter: health_detail_subsection=health_detail, ) - return health_report + return health_report.encode("utf-8") @azure_ds_telemetry_reporter - def _post_health_report(self, document: str) -> None: + def _post_health_report(self, document: bytes) -> None: push_log_to_kvp() # Whenever report_diagnostic_event(diagnostic_msg) is invoked in code, diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py index 0a41fedf..4714ca94 100644 --- a/tests/unittests/sources/test_azure_helper.py +++ b/tests/unittests/sources/test_azure_helper.py @@ -75,6 +75,23 @@ HEALTH_REPORT_XML_TEMPLATE = """\ """ + +def get_formatted_health_report_xml_bytes( + container_id: str, + incarnation: int, + instance_id: str, + health_status: str, + health_detail_subsection: str, +) -> bytes: + return HEALTH_REPORT_XML_TEMPLATE.format( + container_id=container_id, + incarnation=incarnation, + instance_id=instance_id, + health_status=health_status, + health_detail_subsection=health_detail_subsection, + ).encode("utf-8") + + HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent( """\
@@ -626,14 +643,11 @@ class TestGoalStateHealthReporter(CiTestCase): return element.text return None - def _get_formatted_health_report_xml_string(self, **kwargs): - return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs) - def _get_formatted_health_detail_subsection_xml_string(self, **kwargs): return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs) def _get_report_ready_health_document(self): - return self._get_formatted_health_report_xml_string( + return get_formatted_health_report_xml_bytes( incarnation=escape(str(self.default_parameters["incarnation"])), container_id=escape(self.default_parameters["container_id"]), instance_id=escape(self.default_parameters["instance_id"]), @@ -651,7 +665,7 @@ class TestGoalStateHealthReporter(CiTestCase): ) ) - return self._get_formatted_health_report_xml_string( + return get_formatted_health_report_xml_bytes( incarnation=escape(str(self.default_parameters["incarnation"])), container_id=escape(self.default_parameters["container_id"]), instance_id=escape(self.default_parameters["instance_id"]), @@ -887,7 +901,7 @@ class TestGoalStateHealthReporter(CiTestCase): health_description=escape(health_description), ) ) - health_document = self._get_formatted_health_report_xml_string( + health_document = get_formatted_health_report_xml_bytes( incarnation=escape(incarnation), container_id=escape(container_id), instance_id=escape(instance_id), @@ -1132,9 +1146,9 @@ class TestWALinuxAgentShim(CiTestCase): posted_document = ( self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"] ) - self.assertIn(self.test_incarnation, posted_document) - self.assertIn(self.test_container_id, posted_document) - self.assertIn(self.test_instance_id, posted_document) + self.assertIn(self.test_incarnation.encode("utf-8"), posted_document) + self.assertIn(self.test_container_id.encode("utf-8"), posted_document) + self.assertIn(self.test_instance_id.encode("utf-8"), posted_document) def test_goal_state_values_used_for_report_failure(self): shim = wa_shim(endpoint="test_endpoint") @@ -1142,14 +1156,14 @@ class TestWALinuxAgentShim(CiTestCase): posted_document = ( self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"] ) - self.assertIn(self.test_incarnation, posted_document) - self.assertIn(self.test_container_id, posted_document) - self.assertIn(self.test_instance_id, posted_document) + self.assertIn(self.test_incarnation.encode("utf-8"), posted_document) + self.assertIn(self.test_container_id.encode("utf-8"), posted_document) + self.assertIn(self.test_instance_id.encode("utf-8"), posted_document) def test_xml_elems_in_report_ready_post(self): shim = wa_shim(endpoint="test_endpoint") shim.register_with_azure_and_fetch_data() - health_document = HEALTH_REPORT_XML_TEMPLATE.format( + health_document = get_formatted_health_report_xml_bytes( incarnation=escape(self.test_incarnation), container_id=escape(self.test_container_id), instance_id=escape(self.test_instance_id), @@ -1164,7 +1178,7 @@ class TestWALinuxAgentShim(CiTestCase): def test_xml_elems_in_report_failure_post(self): shim = wa_shim(endpoint="test_endpoint") shim.register_with_azure_and_report_failure(description="TestDesc") - health_document = HEALTH_REPORT_XML_TEMPLATE.format( + health_document = get_formatted_health_report_xml_bytes( incarnation=escape(self.test_incarnation), container_id=escape(self.test_container_id), instance_id=escape(self.test_instance_id), -- cgit v1.2.1 From 9bb3ee6bac10ef045925e18374df954c0ec3f784 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 8 Dec 2022 20:46:39 -0700 Subject: autoinstall: clarify docs for users Add autoinstall pointer to subiquity docs to FAQ. Remove cc_ubuntu_autoinstall from docs module definitions since it doesn't give users actionable information. --- doc/rtd/topics/faq.rst | 21 ++++++++++++++++++--- doc/rtd/topics/modules.rst | 1 - 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst index 8dae49e9..f92a22dd 100644 --- a/doc/rtd/topics/faq.rst +++ b/doc/rtd/topics/faq.rst @@ -151,9 +151,6 @@ provided to the system: $ cloud-init schema --system --annotate -As launching instances in the cloud can cost money and take a bit longer, -sometimes it is easier to launch instances locally using Multipass or LXD: - Why did cloud-init never complete? ================================== @@ -200,6 +197,22 @@ systemd, please make sure to include the following logs. $ systemctl --failed +Autoinstall, Preruncmd, Postruncmd +================================== +Since cloud-init ignores top level userdata cloud-config keys, other projects +such as `Juju `_ and subiquity_ +autoinstaller use a YAML-formatted config that combines cloud-init's userdata +cloud-config YAML format with their custom YAML keys. Since cloud-init ignores +unused top level keys, these combined YAML configurations may be valid +cloud-config files, however keys such as ``autoinstall``, ``preruncmd``, and +``postruncmd`` are not used by cloud-init to configure anything. + +Please direct bugs and questions about other projects that use cloud-init to +their respective support channels. For Subiquity autoinstaller that is via IRC +#ubuntu-server on Libera or Discourse. For Juju support see their +`discourse page `_. + + How can I make a module run on every boot? ========================================== Modules have a default frequency that can be overridden. This is done @@ -428,3 +441,5 @@ Whitepapers: .. _cloud-init Summit 2019: https://powersj.io/post/cloud-init-summit19/ .. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/ .. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/ +.. _subiquity: https://ubuntu.com/server/docs/install/autoinstall +.. _juju_project: https://discourse.charmhub.io/t/model-config-cloudinit-userdata/512 diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index 20ca61d5..209cf6e8 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -57,7 +57,6 @@ Module Reference .. automodule:: cloudinit.config.cc_ssh_import_id .. automodule:: cloudinit.config.cc_timezone .. automodule:: cloudinit.config.cc_ubuntu_advantage -.. automodule:: cloudinit.config.cc_ubuntu_autoinstall .. automodule:: cloudinit.config.cc_ubuntu_drivers .. automodule:: cloudinit.config.cc_update_etc_hosts .. automodule:: cloudinit.config.cc_update_hostname -- cgit v1.2.1 From bbf200f3b2f89ab540a31fb61cf0e6a1bc1cfd07 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 8 Dec 2022 22:03:46 -0700 Subject: * net: netplan config root read-only as wifi config can contain creds On netplan systems, network v2 is passed directly though and written to /etc/netplan/50-cloud-init.yaml without validation. Current netplan configuration provides the ability to configure sensitive information such as `wifi:access-points:password`. Limit permissions for /etc/network/50-cloud-init.yaml as read-only for root (600). Since configuration or modification or netplan config needs to be performed by an admin user this permission restriction aligns with netplan tooling. Set root read-only only always and not just 'if' sensitive material exists within custom config because it will add confusion to have two expected modes for this file based on external conditions. --- cloudinit/net/netplan.py | 3 ++- tests/integration_tests/datasources/test_lxd_hotplug.py | 4 ++++ tests/integration_tests/modules/test_combined.py | 11 +++++++++++ tests/unittests/distros/test_netconfig.py | 5 +++-- 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 28c08d6b..9612bcd5 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -260,7 +260,8 @@ class Renderer(renderer.Renderer): if not header.endswith("\n"): header += "\n" - util.write_file(fpnplan, header + content) + + util.write_file(fpnplan, header + content, mode=0o600) if self.clean_default: _clean_default(target=target) diff --git a/tests/integration_tests/datasources/test_lxd_hotplug.py b/tests/integration_tests/datasources/test_lxd_hotplug.py index 8c403e04..438f829b 100644 --- a/tests/integration_tests/datasources/test_lxd_hotplug.py +++ b/tests/integration_tests/datasources/test_lxd_hotplug.py @@ -148,6 +148,10 @@ class TestLxdHotplug: assert post_netplan == expected_netplan, client.read_from_file( "/var/log/cloud-init.log" ) + netplan_perms = client.execute( + "stat -c %a /etc/netplan/50-cloud-init.yaml" + ) + assert "600" == netplan_perms.stdout.strip() ip_info = json.loads(client.execute("ip --json address")) eth2s = [i for i in ip_info if i["ifname"] == "eth2"] assert len(eth2s) == 1 diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index cf5cb199..845c7414 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -77,6 +77,17 @@ timezone: US/Aleutian @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestCombined: + @pytest.mark.ubuntu # Because netplan + def test_netplan_permissions(self, class_client: IntegrationInstance): + """ + Test that netplan config file is generated with proper permissions + """ + response = class_client.execute( + "stat -c %a /etc/netplan/50-cloud-init.yaml" + ) + assert response.ok, "Unable to check perms on 50-cloud-init.yaml" + assert "600" == response.stdout.strip() + def test_final_message(self, class_client: IntegrationInstance): """Test that final_message module works as expected. diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index 236cc09f..8306bacd 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -569,7 +569,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): print(results[cfgpath]) print("----------") self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + self.assertEqual(0o600, get_mode(cfgpath, tmpd)) def netplan_path(self): return "/etc/netplan/50-cloud-init.yaml" @@ -937,6 +937,7 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase): apply_fn(config, bringup) results = dir2dict(tmpd) + mode = 0o600 if with_netplan else 0o644 for cfgpath, expected in expected_cfgs.items(): print("----------") print(expected) @@ -944,7 +945,7 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase): print(results[cfgpath]) print("----------") self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + self.assertEqual(mode, get_mode(cfgpath, tmpd)) def netctl_path(self, iface): return "/etc/netctl/%s" % iface -- cgit v1.2.1 From dc1d27bae63b51a925e40a80475ae45be62b3857 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 8 Dec 2022 22:04:19 -0700 Subject: pycloudlib: bump commit dropping azure api smoke test Avoid breakage on newer Azure SDK versions Bump commit to c9db5bfc --- integration-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-requirements.txt b/integration-requirements.txt index 1056f0e2..7e0465ac 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@d76228e24d400937ba99cdb516460dd757dd3348 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@c9db5bfcf70ef61edff530f796f1e859eef90cf1 pytest -- cgit v1.2.1 From a6fd6ef3af0361c860de62d804dd584052f8a563 Mon Sep 17 00:00:00 2001 From: sxt1001 Date: Mon, 12 Dec 2022 23:43:59 +0800 Subject: Fix the distro.osfamily output problem in the openEuler system. (#1895) LP: #1999042 --- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/openEuler.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 735a7832..e61320c1 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -57,7 +57,6 @@ OSFAMILIES = { "fedora", "mariner", "miraclelinux", - "openEuler", "openmandriva", "photon", "rhel", @@ -65,6 +64,7 @@ OSFAMILIES = { "virtuozzo", ], "suse": ["opensuse", "sles"], + "openEuler": ["openEuler"], } LOG = logging.getLogger(__name__) diff --git a/cloudinit/distros/openEuler.py b/cloudinit/distros/openEuler.py index 3dc0a342..92f1985d 100644 --- a/cloudinit/distros/openEuler.py +++ b/cloudinit/distros/openEuler.py @@ -4,7 +4,9 @@ from cloudinit.distros import rhel class Distro(rhel.Distro): - pass + def __init__(self, name, cfg, paths): + super(Distro, self).__init__(name, cfg, paths) + self.osfamily = "openEuler" # vi: ts=4 expandtab -- cgit v1.2.1 From a177e0764e847707e09223e521b2b719795de593 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 12 Dec 2022 08:46:16 -0700 Subject: Networking Clarification (#1892) net: clarify network function name and docstring --- cloudinit/stages.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 56e9774a..58b53d96 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -801,11 +801,10 @@ class Init: # Run the handlers self._do_handlers(user_data_msg, c_handlers_list, frequency) - def _remove_top_level_network_key(self, cfg): - """If network-config contains top level 'network' key, then remove it. - - Some providers of network configuration skip the top-level network - key, so ensure both methods works. + def _get_network_key_contents(self, cfg) -> dict: + """ + Network configuration can be passed as a dict under a "network" key, or + optionally at the top level. In both cases, return the config. """ if cfg and "network" in cfg: return cfg["network"] @@ -848,9 +847,7 @@ class Init: cfg_source, ) continue - ncfg = self._remove_top_level_network_key( - available_cfgs[cfg_source] - ) + ncfg = self._get_network_key_contents(available_cfgs[cfg_source]) if net.is_disabled_cfg(ncfg): LOG.debug("network config disabled by %s", cfg_source) return (None, cfg_source) -- cgit v1.2.1 From a71e53199736589b84e49ea077b00d3bb1ba0ee7 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 12 Dec 2022 15:22:00 -0700 Subject: azure: fix support for systems without az command installed (#1908) Bump commit to 89804926 --- integration-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-requirements.txt b/integration-requirements.txt index 7e0465ac..8db89d2a 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@c9db5bfcf70ef61edff530f796f1e859eef90cf1 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@898049262cc0b71a142f13f623d3df679a1ec5c9 pytest -- cgit v1.2.1 From 9e6f7ed6ed2b5dd25a51db057bc268ee7ae626bc Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 12 Dec 2022 16:30:31 -0700 Subject: netplan: define features.NETPLAN_CONFIG_ROOT_READ_ONLY flag To make retaining original behavior in stable downstreams easier, provide a feature flag NETPLAN_CONFIG_ROOT_READ_ONLY so /etc/netplan/50-cloud-init.yaml config can remain unchanged as world-readable. Set this flag False to ensure world-readable 50-cloud-init.yaml. Add tests.integration_tests.util.get_feature_flag to extract feature values from cloudinit.features on test system. Co-authored-by: James Falcon --- cloudinit/features.py | 10 ++++++++ cloudinit/net/netplan.py | 4 ++- .../datasources/test_lxd_hotplug.py | 18 +++++++++++--- tests/integration_tests/modules/test_combined.py | 14 ++++++++--- tests/integration_tests/util.py | 10 ++++++++ tests/unittests/distros/test_netconfig.py | 29 ++++++++++++++++++++-- 6 files changed, 76 insertions(+), 9 deletions(-) diff --git a/cloudinit/features.py b/cloudinit/features.py index ac586f6b..62b22e3a 100644 --- a/cloudinit/features.py +++ b/cloudinit/features.py @@ -59,6 +59,16 @@ only non-hashed passwords were expired. (This flag can be removed after Jammy is no longer supported.) """ +NETPLAN_CONFIG_ROOT_READ_ONLY = True +""" +If ``NETPLAN_CONFIG_ROOT_READ_ONLY`` is True, then netplan configuration will +be written as a single root readon-only file /etc/netplan/50-cloud-init.yaml. +This prevents wifi passwords in network v2 configuration from being +world-readable. Prior to 23.1, netplan configuration is world-readable. + +(This flag can be removed after Jammy is no longer supported.) +""" + try: # pylint: disable=wildcard-import from cloudinit.feature_overrides import * # noqa diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 9612bcd5..67b139dd 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -5,6 +5,7 @@ import os import textwrap from typing import Optional, cast +from cloudinit import features from cloudinit import log as logging from cloudinit import safeyaml, subp, util from cloudinit.net import ( @@ -261,7 +262,8 @@ class Renderer(renderer.Renderer): if not header.endswith("\n"): header += "\n" - util.write_file(fpnplan, header + content, mode=0o600) + mode = 0o600 if features.NETPLAN_CONFIG_ROOT_READ_ONLY else 0o644 + util.write_file(fpnplan, header + content, mode=mode) if self.clean_default: _clean_default(target=target) diff --git a/tests/integration_tests/datasources/test_lxd_hotplug.py b/tests/integration_tests/datasources/test_lxd_hotplug.py index 438f829b..81cff252 100644 --- a/tests/integration_tests/datasources/test_lxd_hotplug.py +++ b/tests/integration_tests/datasources/test_lxd_hotplug.py @@ -4,10 +4,14 @@ import pytest from cloudinit import safeyaml from cloudinit.subp import subp +from cloudinit.util import is_true from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.decorators import retry from tests.integration_tests.instances import IntegrationInstance -from tests.integration_tests.util import lxd_has_nocloud +from tests.integration_tests.util import ( + get_feature_flag_value, + lxd_has_nocloud, +) USER_DATA = """\ #cloud-config @@ -116,6 +120,7 @@ class TestLxdHotplug: top_key = "user" else: top_key = "cloud-init" + assert subp( [ "lxc", @@ -148,10 +153,17 @@ class TestLxdHotplug: assert post_netplan == expected_netplan, client.read_from_file( "/var/log/cloud-init.log" ) - netplan_perms = client.execute( + file_perms = class_client.execute( "stat -c %a /etc/netplan/50-cloud-init.yaml" ) - assert "600" == netplan_perms.stdout.strip() + assert file_perms.ok, "Unable to check perms on 50-cloud-init.yaml" + feature_netplan_root_only = is_true( + get_feature_flag_value( + class_client, "NETPLAN_CONFIG_ROOT_READ_ONLY" + ) + ) + config_perms = "600" if feature_netplan_root_only else "644" + assert config_perms == file_perms.stdout.strip() ip_info = json.loads(client.execute("ip --json address")) eth2s = [i for i in ip_info if i["ifname"] == "eth2"] assert len(eth2s) == 1 diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 845c7414..3c1013eb 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -15,10 +15,12 @@ from pathlib import Path import pytest import cloudinit.config +from cloudinit.util import is_true from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.decorators import retry from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import ( + get_feature_flag_value, get_inactive_modules, lxd_has_nocloud, verify_clean_log, @@ -82,11 +84,17 @@ class TestCombined: """ Test that netplan config file is generated with proper permissions """ - response = class_client.execute( + file_perms = class_client.execute( "stat -c %a /etc/netplan/50-cloud-init.yaml" ) - assert response.ok, "Unable to check perms on 50-cloud-init.yaml" - assert "600" == response.stdout.strip() + assert file_perms.ok, "Unable to check perms on 50-cloud-init.yaml" + feature_netplan_root_only = is_true( + get_feature_flag_value( + class_client, "NETPLAN_CONFIG_ROOT_READ_ONLY" + ) + ) + config_perms = "600" if feature_netplan_root_only else "644" + assert config_perms == file_perms.stdout.strip() def test_final_message(self, class_client: IntegrationInstance): """Test that final_message module works as expected. diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 69214e9f..1c2a9284 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -172,3 +172,13 @@ def lxd_has_nocloud(client: IntegrationInstance) -> bool: ["lxc", "config", "metadata", "show", client.instance.name] ) return "/var/lib/cloud/seed/nocloud" in lxd_image_metadata.stdout + + +def get_feature_flag_value(client: IntegrationInstance, key): + value = client.execute( + 'python3 -c "from cloudinit import features; ' + f'print(features.{key})"' + ).strip() + if "NameError" in value: + raise NameError(f"name '{key}' is not defined") + return value diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index 8306bacd..0b79cdc4 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -7,7 +7,15 @@ from io import StringIO from textwrap import dedent from unittest import mock -from cloudinit import distros, helpers, safeyaml, settings, subp, util +from cloudinit import ( + distros, + features, + helpers, + safeyaml, + settings, + subp, + util, +) from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit.net.activators import IfUpDownActivator from tests.unittests.helpers import ( @@ -562,6 +570,8 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): apply_fn(config, bringup) results = dir2dict(tmpd) + + mode = 0o600 if features.NETPLAN_CONFIG_ROOT_READ_ONLY else 0o644 for cfgpath, expected in expected_cfgs.items(): print("----------") print(expected) @@ -569,7 +579,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): print(results[cfgpath]) print("----------") self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o600, get_mode(cfgpath, tmpd)) + self.assertEqual(mode, get_mode(cfgpath, tmpd)) def netplan_path(self): return "/etc/netplan/50-cloud-init.yaml" @@ -609,6 +619,21 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): expected_cfgs=expected_cfgs.copy(), ) + def test_apply_network_config_v2_passthrough_ub_old_behavior(self): + """Kinetic and earlier have 50-cloud-init.yaml world-readable""" + expected_cfgs = { + self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V2_NET_CFG, False) + with mock.patch.object( + features, "NETPLAN_CONFIG_ROOT_READ_ONLY", False + ): + self._apply_and_verify_netplan( + self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + ) + def test_apply_network_config_v2_full_passthrough_ub(self): expected_cfgs = { self.netplan_path(): V2_PASSTHROUGH_NET_CFG_OUTPUT, -- cgit v1.2.1 From 30e5f0049fa6d078539e8354acbf42aaa7d1ed8e Mon Sep 17 00:00:00 2001 From: s-makin Date: Tue, 13 Dec 2022 04:17:07 +0000 Subject: docs: add copy button to code blocks (#1890) Use the sphinx-copybutton extension. Separate commands from output for ease of use. --- doc-requirements.txt | 3 +- doc/rtd/conf.py | 7 + doc/rtd/topics/analyze.rst | 439 +++++++++++++++------------- doc/rtd/topics/bugs.rst | 17 +- doc/rtd/topics/cli.rst | 173 +++++++---- doc/rtd/topics/configuration.rst | 2 +- doc/rtd/topics/datasources/aliyun.rst | 81 ++--- doc/rtd/topics/datasources/altcloud.rst | 26 +- doc/rtd/topics/datasources/azure.rst | 62 ++-- doc/rtd/topics/datasources/cloudstack.rst | 12 +- doc/rtd/topics/datasources/ec2.rst | 16 +- doc/rtd/topics/datasources/exoscale.rst | 18 +- doc/rtd/topics/datasources/gce.rst | 10 +- doc/rtd/topics/datasources/lxd.rst | 18 +- doc/rtd/topics/datasources/nocloud.rst | 50 ++-- doc/rtd/topics/datasources/openstack.rst | 20 +- doc/rtd/topics/datasources/oracle.rst | 8 +- doc/rtd/topics/datasources/smartos.rst | 56 ++-- doc/rtd/topics/debugging.rst | 87 ++++-- doc/rtd/topics/faq.rst | 13 +- doc/rtd/topics/format.rst | 29 +- doc/rtd/topics/network-config-format-v2.rst | 2 +- doc/rtd/topics/network-config.rst | 67 ++--- doc/rtd/topics/tutorials/lxd.rst | 30 +- 24 files changed, 715 insertions(+), 531 deletions(-) diff --git a/doc-requirements.txt b/doc-requirements.txt index 359da21d..efe93fad 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -3,4 +3,5 @@ furo m2r2 pyyaml sphinx -sphinx_design +sphinx-design +sphinx-copybutton diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index a549a444..fe3163f9 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -29,6 +29,7 @@ needs_sphinx = "4.0" # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "m2r2", + "sphinx_copybutton", "sphinx_design", "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", @@ -58,6 +59,12 @@ exclude_patterns = [] # output. They are ignored by default. show_authors = False +# Sphinx-copybutton config options: 1) prompt to be stripped from copied code. +# 2) Set to copy all lines (not just prompt lines) to ensure multiline snippets +# can be copied even if they don't contain an EOF line. +copybutton_prompt_text = "$ " +copybutton_only_copy_prompt_lines = False + # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for diff --git a/doc/rtd/topics/analyze.rst b/doc/rtd/topics/analyze.rst index afc3a5ef..33ab911b 100644 --- a/doc/rtd/topics/analyze.rst +++ b/doc/rtd/topics/analyze.rst @@ -19,10 +19,10 @@ The analyze command requires one of the four subcommands: .. code-block:: shell-session - $ cloud-init analyze blame - $ cloud-init analyze show - $ cloud-init analyze dump - $ cloud-init analyze boot + $ cloud-init analyze blame + $ cloud-init analyze show + $ cloud-init analyze dump + $ cloud-init analyze boot Availability ============ @@ -43,117 +43,128 @@ execution. .. code-block:: shell-session - $ cloud-init analyze blame - -- Boot Record 01 -- - 00.80300s (init-network/config-growpart) - 00.64300s (init-network/config-resizefs) - 00.62100s (init-network/config-ssh) - 00.57300s (modules-config/config-grub-dpkg) - 00.40300s (init-local/search-NoCloud) - 00.38200s (init-network/config-users-groups) - 00.19800s (modules-config/config-apt-configure) - 00.03700s (modules-final/config-keys-to-console) - 00.02100s (init-network/config-update_etc_hosts) - 00.02100s (init-network/check-cache) - 00.00800s (modules-final/config-ssh-authkey-fingerprints) - 00.00800s (init-network/consume-vendor-data) - 00.00600s (modules-config/config-timezone) - 00.00500s (modules-final/config-final-message) - 00.00400s (init-network/consume-user-data) - 00.00400s (init-network/config-mounts) - 00.00400s (init-network/config-disk_setup) - 00.00400s (init-network/config-bootcmd) - 00.00400s (init-network/activate-datasource) - 00.00300s (init-network/config-update_hostname) - 00.00300s (init-network/config-set_hostname) - 00.00200s (modules-final/config-snappy) - 00.00200s (init-network/config-rsyslog) - 00.00200s (init-network/config-ca-certs) - 00.00200s (init-local/check-cache) - 00.00100s (modules-final/config-scripts-vendor) - 00.00100s (modules-final/config-scripts-per-once) - 00.00100s (modules-final/config-salt-minion) - 00.00100s (modules-final/config-rightscale_userdata) - 00.00100s (modules-final/config-phone-home) - 00.00100s (modules-final/config-package-update-upgrade-install) - 00.00100s (modules-final/config-fan) - 00.00100s (modules-config/config-ubuntu-advantage) - 00.00100s (modules-config/config-ssh-import-id) - 00.00100s (modules-config/config-snap) - 00.00100s (modules-config/config-set-passwords) - 00.00100s (modules-config/config-runcmd) - 00.00100s (modules-config/config-locale) - 00.00100s (modules-config/config-byobu) - 00.00100s (modules-config/config-apt-pipelining) - 00.00100s (init-network/config-write-files) - 00.00100s (init-network/config-seed_random) - 00.00100s (init-network/config-migrator) - 00.00000s (modules-final/config-ubuntu-drivers) - 00.00000s (modules-final/config-scripts-user) - 00.00000s (modules-final/config-scripts-per-instance) - 00.00000s (modules-final/config-scripts-per-boot) - 00.00000s (modules-final/config-puppet) - 00.00000s (modules-final/config-power-state-change) - 00.00000s (modules-final/config-mcollective) - 00.00000s (modules-final/config-lxd) - 00.00000s (modules-final/config-landscape) - 00.00000s (modules-final/config-chef) - 00.00000s (modules-config/config-snap_config) - 00.00000s (modules-config/config-ntp) - 00.00000s (modules-config/config-disable-ec2-metadata) - 00.00000s (init-network/setup-datasource) - - 1 boot records analyzed + $ cloud-init analyze blame + +Example output: + +.. code-block:: + + -- Boot Record 01 -- + 00.80300s (init-network/config-growpart) + 00.64300s (init-network/config-resizefs) + 00.62100s (init-network/config-ssh) + 00.57300s (modules-config/config-grub-dpkg) + 00.40300s (init-local/search-NoCloud) + 00.38200s (init-network/config-users-groups) + 00.19800s (modules-config/config-apt-configure) + 00.03700s (modules-final/config-keys-to-console) + 00.02100s (init-network/config-update_etc_hosts) + 00.02100s (init-network/check-cache) + 00.00800s (modules-final/config-ssh-authkey-fingerprints) + 00.00800s (init-network/consume-vendor-data) + 00.00600s (modules-config/config-timezone) + 00.00500s (modules-final/config-final-message) + 00.00400s (init-network/consume-user-data) + 00.00400s (init-network/config-mounts) + 00.00400s (init-network/config-disk_setup) + 00.00400s (init-network/config-bootcmd) + 00.00400s (init-network/activate-datasource) + 00.00300s (init-network/config-update_hostname) + 00.00300s (init-network/config-set_hostname) + 00.00200s (modules-final/config-snappy) + 00.00200s (init-network/config-rsyslog) + 00.00200s (init-network/config-ca-certs) + 00.00200s (init-local/check-cache) + 00.00100s (modules-final/config-scripts-vendor) + 00.00100s (modules-final/config-scripts-per-once) + 00.00100s (modules-final/config-salt-minion) + 00.00100s (modules-final/config-rightscale_userdata) + 00.00100s (modules-final/config-phone-home) + 00.00100s (modules-final/config-package-update-upgrade-install) + 00.00100s (modules-final/config-fan) + 00.00100s (modules-config/config-ubuntu-advantage) + 00.00100s (modules-config/config-ssh-import-id) + 00.00100s (modules-config/config-snap) + 00.00100s (modules-config/config-set-passwords) + 00.00100s (modules-config/config-runcmd) + 00.00100s (modules-config/config-locale) + 00.00100s (modules-config/config-byobu) + 00.00100s (modules-config/config-apt-pipelining) + 00.00100s (init-network/config-write-files) + 00.00100s (init-network/config-seed_random) + 00.00100s (init-network/config-migrator) + 00.00000s (modules-final/config-ubuntu-drivers) + 00.00000s (modules-final/config-scripts-user) + 00.00000s (modules-final/config-scripts-per-instance) + 00.00000s (modules-final/config-scripts-per-boot) + 00.00000s (modules-final/config-puppet) + 00.00000s (modules-final/config-power-state-change) + 00.00000s (modules-final/config-mcollective) + 00.00000s (modules-final/config-lxd) + 00.00000s (modules-final/config-landscape) + 00.00000s (modules-final/config-chef) + 00.00000s (modules-config/config-snap_config) + 00.00000s (modules-config/config-ntp) + 00.00000s (modules-config/config-disable-ec2-metadata) + 00.00000s (init-network/setup-datasource) + + 1 boot records analyzed Show ---- The ``show`` action is similar to ``systemd-analyze critical-chain`` which prints a list of units, the time they started and how long they took. -Cloud-init has four :ref:`boot stages`, and within each stage a number of modules may -run depending on configuration. ``cloudinit-analyze show`` will, for each boot, -print this information and a summary of the total time. +Cloud-init has four :ref:`boot stages`, and within each stage a +number of modules may run depending on configuration. +``cloudinit-analyze show`` will, for each boot, print this information and a +summary of the total time. The following is an abbreviated example of the show output: .. code-block:: shell-session - $ cloud-init analyze show - -- Boot Record 01 -- - The total time elapsed since completing an event is printed after the "@" character. - The time the event takes is printed after the "+" character. - - Starting stage: init-local - |``->no cache found @00.01700s +00.00200s - |`->found local data from DataSourceNoCloud @00.11000s +00.40300s - Finished stage: (init-local) 00.94200 seconds - - Starting stage: init-network - |`->restored from cache with run check: DataSourceNoCloud [seed=/dev/sr0][dsmode=net] @04.79500s +00.02100s - |`->setting up datasource @04.88900s +00.00000s - |`->reading and applying user-data @04.90100s +00.00400s - |`->reading and applying vendor-data @04.90500s +00.00800s - |`->activating datasource @04.95200s +00.00400s - Finished stage: (init-network) 02.72100 seconds - - Starting stage: modules-config - |`->config-snap ran successfully @15.43100s +00.00100s - ... - |`->config-runcmd ran successfully @16.22300s +00.00100s - |`->config-byobu ran successfully @16.23400s +00.00100s - Finished stage: (modules-config) 00.83500 seconds - - Starting stage: modules-final - |`->config-snappy ran successfully @16.87400s +00.00200s - |`->config-package-update-upgrade-install ran successfully @16.87600s +00.00100s - ... - |`->config-final-message ran successfully @16.93700s +00.00500s - |`->config-power-state-change ran successfully @16.94300s +00.00000s - Finished stage: (modules-final) 00.10300 seconds - - Total Time: 4.60100 seconds - - 1 boot records analyzed + $ cloud-init analyze show + +Example output: + +.. code-block:: shell-session + + -- Boot Record 01 -- + The total time elapsed since completing an event is printed after the "@" character. + The time the event takes is printed after the "+" character. + + Starting stage: init-local + |``->no cache found @00.01700s +00.00200s + |`->found local data from DataSourceNoCloud @00.11000s +00.40300s + Finished stage: (init-local) 00.94200 seconds + + Starting stage: init-network + |`->restored from cache with run check: DataSourceNoCloud [seed=/dev/sr0][dsmode=net] @04.79500s +00.02100s + |`->setting up datasource @04.88900s +00.00000s + |`->reading and applying user-data @04.90100s +00.00400s + |`->reading and applying vendor-data @04.90500s +00.00800s + |`->activating datasource @04.95200s +00.00400s + Finished stage: (init-network) 02.72100 seconds + + Starting stage: modules-config + |`->config-snap ran successfully @15.43100s +00.00100s + ... + |`->config-runcmd ran successfully @16.22300s +00.00100s + |`->config-byobu ran successfully @16.23400s +00.00100s + Finished stage: (modules-config) 00.83500 seconds + + Starting stage: modules-final + |`->config-snappy ran successfully @16.87400s +00.00200s + |`->config-package-update-upgrade-install ran successfully @16.87600s +00.00100s + ... + |`->config-final-message ran successfully @16.93700s +00.00500s + |`->config-power-state-change ran successfully @16.94300s +00.00000s + Finished stage: (modules-final) 00.10300 seconds + + Total Time: 4.60100 seconds + + 1 boot records analyzed If additional boot records are detected then they are printed out from oldest to newest. @@ -167,85 +178,90 @@ consumed for other reporting needs. Each element in the list is a boot entry. .. code-block:: shell-session - $ cloud-init analyze dump - [ - { - "description": "starting search for local datasources", - "event_type": "start", - "name": "init-local", - "origin": "cloudinit", - "timestamp": 1567057578.037 - }, - { - "description": "attempting to read from cache [check]", - "event_type": "start", - "name": "init-local/check-cache", - "origin": "cloudinit", - "timestamp": 1567057578.054 - }, - { - "description": "no cache found", - "event_type": "finish", - "name": "init-local/check-cache", - "origin": "cloudinit", - "result": "SUCCESS", - "timestamp": 1567057578.056 - }, - { - "description": "searching for local data from DataSourceNoCloud", - "event_type": "start", - "name": "init-local/search-NoCloud", - "origin": "cloudinit", - "timestamp": 1567057578.147 - }, - { - "description": "found local data from DataSourceNoCloud", - "event_type": "finish", - "name": "init-local/search-NoCloud", - "origin": "cloudinit", - "result": "SUCCESS", - "timestamp": 1567057578.55 - }, - { - "description": "searching for local datasources", - "event_type": "finish", - "name": "init-local", - "origin": "cloudinit", - "result": "SUCCESS", - "timestamp": 1567057578.979 - }, - { - "description": "searching for network datasources", - "event_type": "start", - "name": "init-network", - "origin": "cloudinit", - "timestamp": 1567057582.814 - }, - { - "description": "attempting to read from cache [trust]", - "event_type": "start", - "name": "init-network/check-cache", - "origin": "cloudinit", - "timestamp": 1567057582.832 - }, - ... - { - "description": "config-power-state-change ran successfully", - "event_type": "finish", - "name": "modules-final/config-power-state-change", - "origin": "cloudinit", - "result": "SUCCESS", - "timestamp": 1567057594.98 - }, - { - "description": "running modules for final", - "event_type": "finish", - "name": "modules-final", - "origin": "cloudinit", - "result": "SUCCESS", - "timestamp": 1567057594.982 - } - ] + $ cloud-init analyze dump + +Example output: + +.. code-block:: + + [ + { + "description": "starting search for local datasources", + "event_type": "start", + "name": "init-local", + "origin": "cloudinit", + "timestamp": 1567057578.037 + }, + { + "description": "attempting to read from cache [check]", + "event_type": "start", + "name": "init-local/check-cache", + "origin": "cloudinit", + "timestamp": 1567057578.054 + }, + { + "description": "no cache found", + "event_type": "finish", + "name": "init-local/check-cache", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057578.056 + }, + { + "description": "searching for local data from DataSourceNoCloud", + "event_type": "start", + "name": "init-local/search-NoCloud", + "origin": "cloudinit", + "timestamp": 1567057578.147 + }, + { + "description": "found local data from DataSourceNoCloud", + "event_type": "finish", + "name": "init-local/search-NoCloud", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057578.55 + }, + { + "description": "searching for local datasources", + "event_type": "finish", + "name": "init-local", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057578.979 + }, + { + "description": "searching for network datasources", + "event_type": "start", + "name": "init-network", + "origin": "cloudinit", + "timestamp": 1567057582.814 + }, + { + "description": "attempting to read from cache [trust]", + "event_type": "start", + "name": "init-network/check-cache", + "origin": "cloudinit", + "timestamp": 1567057582.832 + }, + ... + { + "description": "config-power-state-change ran successfully", + "event_type": "finish", + "name": "modules-final/config-power-state-change", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057594.98 + }, + { + "description": "running modules for final", + "event_type": "finish", + "name": "modules-final", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057594.982 + } + ] Boot @@ -265,15 +281,20 @@ to cloud-init startup, and tracking regression. .. code-block:: shell-session - $ cloud-init analyze boot - -- Most Recent Boot Record -- - Kernel Started at: 2019-08-29 01:35:37.753790 - Kernel ended boot at: 2019-08-29 01:35:38.807407 - Kernel time to boot (seconds): 1.053617000579834 - Cloud-init activated by systemd at: 2019-08-29 01:35:43.992460 - Time between Kernel end boot and Cloud-init activation (seconds): 5.185053110122681 - Cloud-init start: 2019-08-29 08:35:45.867000 - successful + $ cloud-init analyze boot + +Example output: + +.. code-block:: + + -- Most Recent Boot Record -- + Kernel Started at: 2019-08-29 01:35:37.753790 + Kernel ended boot at: 2019-08-29 01:35:38.807407 + Kernel time to boot (seconds): 1.053617000579834 + Cloud-init activated by systemd at: 2019-08-29 01:35:43.992460 + Time between Kernel end boot and Cloud-init activation (seconds): 5.185053110122681 + Cloud-init start: 2019-08-29 08:35:45.867000 + successful Timestamp Gathering ^^^^^^^^^^^^^^^^^^^ @@ -288,20 +309,36 @@ analyze boot runs: cloud-init local systemd unit In order to gather the necessary timestamps using systemd, running the -commands below will gather the UserspaceTimestamp and InactiveExitTimestamp: +following command will gather the UserspaceTimestamp: .. code-block:: shell-session - $ systemctl show -p UserspaceTimestampMonotonic - UserspaceTimestampMonotonic=989279 - $ systemctl show cloud-init-local -p InactiveExitTimestampMonotonic - InactiveExitTimestampMonotonic=4493126 + $ systemctl show -p UserspaceTimestampMonotonic + +Example output: + +.. code-block:: + + UserspaceTimestampMonotonic=989279 The UserspaceTimestamp tracks when the init system starts, which is used as -an indicator of kernel finishing initialization. The InactiveExitTimestamp -tracks when a particular systemd unit transitions from the Inactive to Active -state, which can be used to mark the beginning of systemd's activation of -cloud-init. +an indicator of kernel finishing initialization. + +Running the following command will gather the InactiveExitTimestamp: + +.. code-block:: shell-session + + $ systemctl show cloud-init-local -p InactiveExitTimestampMonotonic + +Example output: + +.. code-block:: + + InactiveExitTimestampMonotonic=4493126 + +The InactiveExitTimestamp tracks when a particular systemd unit transitions +from the Inactive to Active state, which can be used to mark the beginning +of systemd's activation of cloud-init. Currently this only works for distros that use systemd as the init process. We will be expanding support for other distros in the future and this document diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst index c66048e2..d7e79740 100644 --- a/doc/rtd/topics/bugs.rst +++ b/doc/rtd/topics/bugs.rst @@ -17,17 +17,22 @@ To aid in debugging, please collect the necessary logs. To do so, run the .. code-block:: shell-session - $ sudo cloud-init collect-logs - Wrote /home/ubuntu/cloud-init.tar.gz + $ sudo cloud-init collect-logs + +Example output: + +.. code-block:: + + Wrote /home/ubuntu/cloud-init.tar.gz If your version of cloud-init does not have the `collect-logs` subcommand, then please manually collect the base log files by doing the following: .. code-block:: shell-session - $ sudo dmesg > dmesg.txt - $ sudo journalctl -o short-precise > journal.txt - $ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \ + $ sudo dmesg > dmesg.txt + $ sudo journalctl -o short-precise > journal.txt + $ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \ /var/log/cloud-init.log /var/log/cloud-init-output.log Report Upstream Bug @@ -63,7 +68,7 @@ Launchpad: .. code-block:: shell-session - $ ubuntu-bug cloud-init + $ ubuntu-bug cloud-init If that does not work or is not an option, please collect the logs using the commands in the above Collect Logs section and then report the bug on the diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst index bd7cac62..463ac38c 100644 --- a/doc/rtd/topics/cli.rst +++ b/doc/rtd/topics/cli.rst @@ -8,8 +8,13 @@ option. This can be used against cloud-init itself or any of its subcommands. .. code-block:: shell-session - $ cloud-init --help - usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force] + $ cloud-init --help + +Example output: + +.. code-block:: + + usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force] {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema} ... options: @@ -133,9 +138,14 @@ this document. .. code-block:: shell-session - $ cloud-init features - NETWORK_CONFIG_V1 - NETWORK_CONFIG_V2 + $ cloud-init features + +Example output: + +.. code-block:: + + NETWORK_CONFIG_V1 + NETWORK_CONFIG_V2 .. _cli_init: @@ -199,6 +209,11 @@ aliases: .. code-block:: shell-session $ cloud-init query --list-keys + +Example output: + +.. code-block:: + _beta_keys availability_zone base64_encoded_keys @@ -215,35 +230,57 @@ aliases: v1 vendordata -Below demonstrates how to query standardized metadata from clouds: +Here are a few examples of how to query standardized metadata from clouds: + +.. code-block:: shell-session + + $ cloud-init query v1.cloud_name + +Example output: + +.. code-block:: + + aws # or openstack, azure, gce etc. + +Any standardized instance-data under a key is aliased as a top-level key +for convenience: .. code-block:: shell-session - % cloud-init query v1.cloud_name - aws # or openstack, azure, gce etc. + $ cloud-init query cloud_name + +Example output: + +.. code-block:: + + aws # or openstack, azure, gce etc. + +One can also query datasource-specific metadata on EC2, e.g.: + +.. code-block:: shell-session - # Any standardized instance-data under a key is aliased as a top-level key for convenience. - % cloud-init query cloud_name - aws # or openstack, azure, gce etc. + $ cloud-init query ds.meta_data.public_ipv4 - # Query datasource-specific metadata on EC2 - % cloud-init query ds.meta_data.public_ipv4 .. note:: - The standardized instance data keys under **v#** are guaranteed not to change - behavior or format. If using top-level convenience aliases for any - standardized instance data keys, the most value (highest **v#**) of that key - name is what is reported as the top-level value. So these aliases act as a - 'latest'. + The standardized instance data keys under **v#** are guaranteed not to change + behavior or format. If using top-level convenience aliases for any + standardized instance data keys, the most value (highest **v#**) of that key + name is what is reported as the top-level value. So these aliases act as a + 'latest'. -This data can then be formatted to generate custom strings or data: +This data can then be formatted to generate custom strings or data. For +example, we can generate a custom hostname fqdn based on instance-id, cloud and +region: .. code-block:: shell-session - # Generate a custom hostname fqdn based on instance-id, cloud and region - % cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com' - custom-i-0e91f69987f37ec74.us-east-2.aws.com + $ cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com' + +.. code-block:: + + custom-i-0e91f69987f37ec74.us-east-2.aws.com .. _cli_schema: @@ -266,7 +303,7 @@ errors on stdout. .. code-block:: shell-session - $ cloud-init schema -c ./config.yml --annotate + $ cloud-init schema -c ./config.yml --annotate .. _cli_single: @@ -286,12 +323,12 @@ default frequency of once-per-instance: .. code-block:: shell-session - $ cloud-init single --name set_hostname --frequency always + $ cloud-init single --name set_hostname --frequency always .. note:: - Mileage may vary trying to re-run each cloud-config module, as - some are not idempotent. + Mileage may vary trying to re-run each cloud-config module, as + some are not idempotent. .. _cli_status: @@ -307,38 +344,62 @@ non-zero if an error is detected in cloud-init. * ``--format [yaml|json|tabular]``: machine-readable JSON or YAML detailed output -Below are examples of output when cloud-init is running, showing status and -the currently running modules, as well as when it is done. +The ``status`` command can be used simply as follows: + +.. code-block:: shell-session + + $ cloud-init status + +Which shows whether cloud-init is currently running, done, disabled, or in +error, as in this example output: + +.. code-block:: + + status: running + +The ``--long`` option, shown below, provides a more verbose output. + +.. code-block:: shell-session + + $ cloud-init status --long + +Example output when cloud-init is running: + +.. code-block:: + + status: running + time: Fri, 26 Jan 2018 21:39:43 +0000 + detail: + Running in stage: init-local + +Example output when cloud-init is done: + +.. code-block:: + + status: done + boot_status_code: enabled-by-generator + last_update: Tue, 16 Aug 2022 19:12:58 +0000 + detail: + DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] + +The detailed output can be shown in machine-readable JSON or YAML with the +``format`` option, for example: .. code-block:: shell-session - $ cloud-init status - status: running - - $ cloud-init status --long - status: running - time: Fri, 26 Jan 2018 21:39:43 +0000 - detail: - Running in stage: init-local - - $ cloud-init status - status: done - - $ cloud-init status --long - status: done - boot_status_code: enabled-by-generator - last_update: Tue, 16 Aug 2022 19:12:58 +0000 - detail: - DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] - - $ cloud-init status --format=json - { - "boot_status_code": "enabled-by-generator", - "datasource": "nocloud", - "detail": "DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]", - "errors": [], - "last_update": "Tue, 16 Aug 2022 19:12:58 +0000", - "status": "done" - } + $ cloud-init status --format=json + +Which would produce the following example output: + +.. code-block:: + + { + "boot_status_code": "enabled-by-generator", + "datasource": "nocloud", + "detail": "DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]", + "errors": [], + "last_update": "Tue, 16 Aug 2022 19:12:58 +0000", + "status": "done" + } .. _More details on machine-id: https://www.freedesktop.org/software/systemd/man/machine-id.html diff --git a/doc/rtd/topics/configuration.rst b/doc/rtd/topics/configuration.rst index 14716f29..19d053c5 100644 --- a/doc/rtd/topics/configuration.rst +++ b/doc/rtd/topics/configuration.rst @@ -41,7 +41,7 @@ These get fetched from the datasource and are defined at instance launch. Network Configuration ===================== Network configuration happens independently from other cloud-init -configuration. See :ref:`network configuration documentation` +configuration. See :ref:`network configuration documentation` for more information. Specifying Configuration diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst index 0bb9c19e..32fcb4bc 100644 --- a/doc/rtd/topics/datasources/aliyun.rst +++ b/doc/rtd/topics/datasources/aliyun.rst @@ -19,13 +19,13 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). An example configuration with the default values is provided below: -.. sourcecode:: yaml +.. code-block:: yaml - datasource: - AliYun: - metadata_urls: ["http://100.100.100.200"] - timeout: 50 - max_wait: 120 + datasource: + AliYun: + metadata_urls: ["http://100.100.100.200"] + timeout: 50 + max_wait: 120 Versions ^^^^^^^^ @@ -42,37 +42,47 @@ You can list the versions available to your instance with: .. code-block:: shell-session - $ curl http://100.100.100.200/ - 2016-01-01 - latest + $ curl http://100.100.100.200/ + +Example output: + +.. code-block:: + + 2016-01-01 + latest Metadata ^^^^^^^^ Instance metadata can be queried at -``http://100.100.100.200/2016-01-01/meta-data`` +``http://100.100.100.200/2016-01-01/meta-data``: .. code-block:: shell-session - $ curl http://100.100.100.200/2016-01-01/meta-data - dns-conf/ - eipv4 - hostname - image-id - instance-id - instance/ - mac - network-type - network/ - ntp-conf/ - owner-account-id - private-ipv4 - public-keys/ - region-id - serial-number - source-address - sub-private-ipv4-list - vpc-cidr-block - vpc-id + $ curl http://100.100.100.200/2016-01-01/meta-data + +Example output: + +.. code-block:: + + dns-conf/ + eipv4 + hostname + image-id + instance-id + instance/ + mac + network-type + network/ + ntp-conf/ + owner-account-id + private-ipv4 + public-keys/ + region-id + serial-number + source-address + sub-private-ipv4-list + vpc-cidr-block + vpc-id Userdata ^^^^^^^^ @@ -82,8 +92,13 @@ If no user-data is provided, this will return a 404. .. code-block:: shell-session - $ curl http://100.100.100.200/2016-01-01/user-data - #!/bin/sh - echo "Hello World." + $ curl http://100.100.100.200/2016-01-01/user-data + +Example output: + +.. code-block:: + + #!/bin/sh + echo "Hello World." .. vi: textwidth=79 diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst index acd5e2a3..b69a3093 100644 --- a/doc/rtd/topics/datasources/altcloud.rst +++ b/doc/rtd/topics/datasources/altcloud.rst @@ -20,13 +20,13 @@ The format of the Custom Properties entry must be: For example to pass a simple bash script: -.. sourcecode:: sh +.. code-block:: sh - % cat simple_script.bash + $ cat simple_script.bash #!/bin/bash echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt - % base64 < simple_script.bash + $ base64 < simple_script.bash IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK To pass this example script to cloud-init running in a `RHEVm`_ v3.0 VM @@ -61,26 +61,26 @@ For example, to pass the same ``simple_script.bash`` to vSphere: Create the ISO ^^^^^^^^^^^^^^ -.. sourcecode:: sh +.. code-block:: sh - % mkdir my-iso + $ mkdir my-iso NOTE: The file name on the ISO must be: ``user-data.txt`` -.. sourcecode:: sh +.. code-block:: sh - % cp simple_script.bash my-iso/user-data.txt - % genisoimage -o user-data.iso -r my-iso + $ cp simple_script.bash my-iso/user-data.txt + $ genisoimage -o user-data.iso -r my-iso Verify the ISO ^^^^^^^^^^^^^^ -.. sourcecode:: sh +.. code-block:: sh - % sudo mkdir /media/vsphere_iso - % sudo mount -o loop user-data.iso /media/vsphere_iso - % cat /media/vsphere_iso/user-data.txt - % sudo umount /media/vsphere_iso + $ sudo mkdir /media/vsphere_iso + $ sudo mount -o loop user-data.iso /media/vsphere_iso + $ cat /media/vsphere_iso/user-data.txt + $ sudo umount /media/vsphere_iso Then, launch the `vSphere`_ VM the ISO user-data.iso attached as a CDROM. diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index b73d7d38..345c2c1b 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -52,14 +52,14 @@ merged into the 'datasource: Azure' entry. An example configuration with the default values is provided below: -.. sourcecode:: yaml +.. code-block:: yaml - datasource: - Azure: - apply_network_config: true - data_dir: /var/lib/waagent - disk_aliases: - ephemeral0: /dev/disk/cloud/azure_resource + datasource: + Azure: + apply_network_config: true + data_dir: /var/lib/waagent + disk_aliases: + ephemeral0: /dev/disk/cloud/azure_resource Userdata @@ -75,30 +75,30 @@ In the example below, user-data provided is 'this is my userdata' Example: -.. sourcecode:: xml - - - 1.0 - - LinuxProvisioningConfiguration - myHost - myuser - - dGhpcyBpcyBteSB1c2VyZGF0YQ=== - eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0= - true - - - - 6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7 - this-value-unused - - - - - +.. code-block:: xml + + + 1.0 + + LinuxProvisioningConfiguration + myHost + myuser + + dGhpcyBpcyBteSB1c2VyZGF0YQ=== + eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0= + true + + + + 6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7 + this-value-unused + + + + + hostname -------- diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst index e889ab6e..93633308 100644 --- a/doc/rtd/topics/datasources/cloudstack.rst +++ b/doc/rtd/topics/datasources/cloudstack.rst @@ -14,7 +14,7 @@ URLs to access user-data and meta-data from the Virtual Machine. router that points to the next UserData server (which is usually also the virtual router). -.. code:: bash +.. code-block:: bash http://data-server./latest/user-data http://data-server./latest/meta-data @@ -40,12 +40,12 @@ The settings that may be configured are: An example configuration with the default values is provided below: -.. sourcecode:: yaml +.. code-block:: yaml - datasource: - CloudStack: - max_wait: 120 - timeout: 50 + datasource: + CloudStack: + max_wait: 120 + timeout: 50 .. _Apache CloudStack: http://cloudstack.apache.org/ diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index d30e1bb6..bbc4ee4e 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -109,14 +109,14 @@ The settings that may be configured are: An example configuration with the default values is provided below: -.. sourcecode:: yaml - - datasource: - Ec2: - metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"] - max_wait: 120 - timeout: 50 - apply_full_imds_network_config: true +.. code-block:: yaml + + datasource: + Ec2: + metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"] + max_wait: 120 + timeout: 50 + apply_full_imds_network_config: true Notes ----- diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst index 2d2e4544..e5e946de 100644 --- a/doc/rtd/topics/datasources/exoscale.rst +++ b/doc/rtd/topics/datasources/exoscale.rst @@ -58,12 +58,12 @@ The settings available are: An example configuration with the default values is provided below: -.. sourcecode:: yaml - - datasource: - Exoscale: - metadata_url: "http://169.254.169.254" - api_version: "1.0" - password_server_port: 8080 - timeout: 10 - retries: 6 +.. code-block:: yaml + + datasource: + Exoscale: + metadata_url: "http://169.254.169.254" + api_version: "1.0" + password_server_port: 8080 + timeout: 10 + retries: 6 diff --git a/doc/rtd/topics/datasources/gce.rst b/doc/rtd/topics/datasources/gce.rst index 3aeb9afc..c3de32a5 100644 --- a/doc/rtd/topics/datasources/gce.rst +++ b/doc/rtd/topics/datasources/gce.rst @@ -30,12 +30,12 @@ The settings that may be configured are: An example configuration with the default values is provided below: -.. sourcecode:: yaml +.. code-block:: yaml - datasource: - GCE: - retries: 5 - sec_between_retries: 1 + datasource: + GCE: + retries: 5 + sec_between_retries: 1 .. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst index 3b523d50..b7986228 100644 --- a/doc/rtd/topics/datasources/lxd.rst +++ b/doc/rtd/topics/datasources/lxd.rst @@ -59,15 +59,15 @@ Note: LXD version 4.22 introduced a new scope of config keys prefaced by By default, network configuration from this datasource will be: -.. code:: yaml - - version: 1 - config: - - type: physical - name: eth0 - subnets: - - type: dhcp - control: auto +.. code-block:: yaml + + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp + control: auto This datasource is intended to replace :ref:`datasource_nocloud` datasource for LXD instances with a more direct support for LXD APIs instead diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index aedc0f58..cde64bb1 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -129,31 +129,31 @@ See an example below. Note specifically that this file does not have a top level ``network`` key as it is already assumed to be network configuration based on the filename. -.. code:: yaml - - version: 1 - config: - - type: physical - name: interface0 - mac_address: "52:54:00:12:34:00" - subnets: - - type: static - address: 192.168.1.10 - netmask: 255.255.255.0 - gateway: 192.168.1.254 - - -.. code:: yaml - - version: 2 - ethernets: - interface0: - match: - macaddress: "52:54:00:12:34:00" - set-name: interface0 - addresses: - - 192.168.1.10/255.255.255.0 - gateway4: 192.168.1.254 +.. code-block:: yaml + + version: 1 + config: + - type: physical + name: interface0 + mac_address: "52:54:00:12:34:00" + subnets: + - type: static + address: 192.168.1.10 + netmask: 255.255.255.0 + gateway: 192.168.1.254 + + +.. code-block:: yaml + + version: 2 + ethernets: + interface0: + match: + macaddress: "52:54:00:12:34:00" + set-name: interface0 + addresses: + - 192.168.1.10/255.255.255.0 + gateway4: 192.168.1.254 .. _iso9660: https://en.wikipedia.org/wiki/ISO_9660 diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index 7818507a..286b2f44 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -48,15 +48,15 @@ The settings that may be configured are: An example configuration with the default values is provided below: -.. sourcecode:: yaml +.. code-block:: yaml - datasource: - OpenStack: - metadata_urls: ["http://169.254.169.254"] - max_wait: -1 - timeout: 10 - retries: 5 - apply_network_config: True + datasource: + OpenStack: + metadata_urls: ["http://169.254.169.254"] + max_wait: -1 + timeout: 10 + retries: 5 + apply_network_config: True Vendor Data @@ -74,9 +74,9 @@ data should work for vendor data. For example, configuring the following as vendor data in OpenStack would upgrade packages and install ``htop`` on all instances: -.. sourcecode:: json +.. code-block:: json - {"cloud-init": "#cloud-config\npackage_upgrade: True\npackages:\n - htop"} + {"cloud-init": "#cloud-config\npackage_upgrade: True\npackages:\n - htop"} For more general information about how cloud-init handles vendor data, including how it can be disabled by users on instances, see diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst index 7e480021..4b4b23b2 100644 --- a/doc/rtd/topics/datasources/oracle.rst +++ b/doc/rtd/topics/datasources/oracle.rst @@ -39,11 +39,11 @@ The settings that may be configured are: An example configuration with the default values is provided below: -.. sourcecode:: yaml +.. code-block:: yaml - datasource: - Oracle: - configure_secondary_nics: false + datasource: + Oracle: + configure_secondary_nics: false .. _Oracle Compute Infrastructure: https://cloud.oracle.com/ .. vi: textwidth=79 diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst index 6fe45c73..85bc7eff 100644 --- a/doc/rtd/topics/datasources/smartos.rst +++ b/doc/rtd/topics/datasources/smartos.rst @@ -70,37 +70,37 @@ Disabling user-script Cloud-init uses the per-boot script functionality to handle the execution of the user-script. If you want to prevent this use a cloud-config of: -.. code:: yaml - - #cloud-config - cloud_final_modules: - - scripts-per-once - - scripts-per-instance - - scripts-user - - ssh-authkey-fingerprints - - keys-to-console - - phone-home - - final-message - - power-state-change +.. code-block:: yaml + + #cloud-config + cloud_final_modules: + - scripts-per-once + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - phone-home + - final-message + - power-state-change Alternatively you can use the json patch method -.. code:: yaml - - #cloud-config-jsonp - [ - { "op": "replace", - "path": "/cloud_final_modules", - "value": ["scripts-per-once", - "scripts-per-instance", - "scripts-user", - "ssh-authkey-fingerprints", - "keys-to-console", - "phone-home", - "final-message", - "power-state-change"] - } - ] +.. code-block:: yaml + + #cloud-config-jsonp + [ + { "op": "replace", + "path": "/cloud_final_modules", + "value": ["scripts-per-once", + "scripts-per-instance", + "scripts-user", + "ssh-authkey-fingerprints", + "keys-to-console", + "phone-home", + "final-message", + "power-state-change"] + } + ] The default cloud-config includes "script-per-boot". Cloud-init will still ingest and write the user-data but will not execute it, when you disable diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst index 23ef0dfe..68e68cb4 100644 --- a/doc/rtd/topics/debugging.rst +++ b/doc/rtd/topics/debugging.rst @@ -27,6 +27,11 @@ subcommands default to reading /var/log/cloud-init.log. .. code-block:: shell-session $ cloud-init analyze show -i my-cloud-init.log + +Example output: + +.. code-block:: shell-session + -- Boot Record 01 -- The total time elapsed since completing an event is printed after the "@" character. @@ -45,6 +50,11 @@ subcommands default to reading /var/log/cloud-init.log. .. code-block:: shell-session $ cloud-init analyze dump -i my-cloud-init.log + +Example output: + +.. code-block:: + [ { "description": "running config modules", @@ -61,6 +71,11 @@ subcommands default to reading /var/log/cloud-init.log. .. code-block:: shell-session $ cloud-init analyze blame -i my-cloud-init.log + +Example output: + +.. code-block:: + -- Boot Record 11 -- 00.01300s (modules-final/config-scripts-per-boot) 00.00400s (modules-final/config-final-message) @@ -73,6 +88,11 @@ subcommands default to reading /var/log/cloud-init.log. .. code-block:: shell-session $ cloud-init analyze boot + +Example output: + +.. code-block:: + -- Most Recent Boot Record -- Kernel Started at: 2019-06-13 15:59:55.809385 Kernel ended boot at: 2019-06-13 16:00:00.944740 @@ -155,10 +175,15 @@ commandline: .. code-block:: shell-session - $ sudo cloud-init single --name cc_ssh --frequency always - ... - Generating public/private ed25519 key pair - ... + $ sudo cloud-init single --name cc_ssh --frequency always + +Example output: + +.. code-block:: + + ... + Generating public/private ed25519 key pair + ... Inspect cloud-init.log for output of what operations were performed as a result. @@ -203,22 +228,23 @@ from **-proposed** hostname: SRU-worked-{{v1.cloud_name}} 2. Wait for current cloud-init to complete, replace `` with the IP - address of the VM that you launched in step 1: + address of the VM that you launched in step 1. Be sure to make a note of the + datasource cloud-init detected in --long output. You will need this during + step 5, where you will use it to confirm the same datasource is + detected after the upgrade: .. code-block:: bash CI_VM_IP= - # Make note of the datasource cloud-init detected in --long output. - # In step 5, you will use this to confirm the same datasource is detected after upgrade. - ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long + $ ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long 3. Set up the **-proposed** pocket on your VM and upgrade to the **-proposed** - cloud-init: + cloud-init. To do this, create the following bash script, which will + add the -proposed pocket to APT's sources and install cloud-init + from that pocket: .. code-block:: bash - # Create a script that will add the -proposed pocket to APT's sources - # and install cloud-init from that pocket cat > setup_proposed.sh <: + +.. code-block:: shell-session + + $ ssh ubuntu@$CI_VM_IP -- hostname + +Then, check for any errors or warnings in cloud-init logs. If successful, +this will produce no output: + +.. code-block:: shell-session - ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long - # Make sure hostname was set properly to SRU-worked- - ssh ubuntu@$CI_VM_IP -- hostname - # Check for any errors or warnings in cloud-init logs. - # (This should produce no output if successful.) - ssh ubuntu@$CI_VM_IP -- grep Trace "/var/log/cloud-init*" + $ ssh ubuntu@$CI_VM_IP -- grep Trace "/var/log/cloud-init*" 6. If you encounter an error during SRU testing: diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst index f92a22dd..f48801b6 100644 --- a/doc/rtd/topics/faq.rst +++ b/doc/rtd/topics/faq.rst @@ -93,6 +93,11 @@ To find what datasource is getting used run the `cloud-id` command: .. code-block:: shell-session $ cloud-id + +Which will tell you which datasource is being used, for example: + +.. code-block:: + nocloud If the cloud-id is not what is expected, then running the `ds-identify` @@ -119,10 +124,10 @@ cloud-init: .. code-block:: shell-session - $ sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force - $ sudo cloud-init clean --logs - $ sudo cloud-init init --local - $ sudo cloud-init init + $ sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force + $ sudo cloud-init clean --logs + $ sudo cloud-init init --local + $ sudo cloud-init init .. warning:: diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index 7d75d168..6136753d 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -44,22 +44,24 @@ Typically used by those who just want to execute a shell script. Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive. -.. note:: - New in cloud-init v. 18.4: User-data scripts can also render cloud instance - metadata variables using jinja templating. See - :ref:`instance_metadata` for more information. +User-data scripts can optionally render cloud instance metadata variables using +jinja templating. See :ref:`instance_metadata` for more information. -Example -------- +Example Script +-------------- -.. code-block:: shell-session +Create a script file ``myscript.sh`` that contains the following: - $ cat myscript.sh +.. code-block:: - #!/bin/sh - echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt + #!/bin/sh + echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt - $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 +Now run: + +.. code-block:: shell-session + + $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 Kernel Command Line =================== @@ -89,6 +91,11 @@ Supported content-types are listed from the cloud-init subcommand make-mime: .. code-block:: shell-session $ cloud-init devel make-mime --list-types + +Example output: + +.. code-block:: + cloud-boothook cloud-config cloud-config-archive diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index b3e1df27..924ccf87 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -40,7 +40,7 @@ For example the following could be present in ethernets: [] It may also be provided in other locations including the -:ref:`datasource_nocloud`, see :ref:`default_behavior` for other places. +:ref:`datasource_nocloud`, see :ref:`network_config` for other places. Supported device ``types`` values are as follows: diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index b6d7a9be..0327fa45 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -4,16 +4,6 @@ Network Configuration ********************* -- Default Behavior -- Disabling Network Configuration -- Fallback Networking -- Network Configuration Sources -- Network Configuration Outputs -- Network Output Policy -- Network Configuration Tools -- Examples - -.. _default_behavior: Default Behavior ================ @@ -40,9 +30,9 @@ network interface. .. note:: - The network-config value is expected to be a Base64 encoded YAML string in - :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it - can be compressed with ``gzip`` prior to Base64 encoding. + The network-config value is expected to be a Base64 encoded YAML string in + :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it + can be compressed with ``gzip`` prior to Base64 encoding. Disabling Network Configuration @@ -176,11 +166,11 @@ The following Datasources optionally provide network configuration: For more information on network configuration formats .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - network-config-format-eni.rst - network-config-format-v1.rst - network-config-format-v2.rst + network-config-format-eni.rst + network-config-format-v1.rst + network-config-format-v2.rst Network Configuration Outputs @@ -292,26 +282,31 @@ Example output converting V2 to sysconfig: .. code-block:: shell-session - % tools/net-convert.py --network-data v2.yaml --kind yaml \ + $ tools/net-convert.py --network-data v2.yaml --kind yaml \ --output-kind sysconfig -d target - % cat target/etc/sysconfig/network-scripts/ifcfg-eth* - # Created by cloud-init on instance boot automatically, do not edit. - # - BOOTPROTO=static - DEVICE=eth7 - IPADDR=192.168.1.5/255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - # Created by cloud-init on instance boot automatically, do not edit. - # - BOOTPROTO=dhcp - DEVICE=eth9 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no + $ cat target/etc/sysconfig/network-scripts/ifcfg-eth* + +Example output: + +.. code-block:: + + # Created by cloud-init on instance boot automatically, do not edit. + # + BOOTPROTO=static + DEVICE=eth7 + IPADDR=192.168.1.5/255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + # Created by cloud-init on instance boot automatically, do not edit. + # + BOOTPROTO=dhcp + DEVICE=eth9 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no .. _Cloud-init: https://launchpad.net/cloud-init diff --git a/doc/rtd/topics/tutorials/lxd.rst b/doc/rtd/topics/tutorials/lxd.rst index 7ffc80cc..67c97340 100644 --- a/doc/rtd/topics/tutorials/lxd.rst +++ b/doc/rtd/topics/tutorials/lxd.rst @@ -21,7 +21,7 @@ testing and iterating on our user data definition. Setup LXD ========= -Skip this section if you already have LXD_ setup. +Skip this section if you already have LXD_ set up. Install LXD ----------- @@ -46,7 +46,7 @@ be changed at a later time if needed. Define our user data ==================== -Now that LXD is setup, we can define our user data. Create the +Now that LXD is set up, we can define our user data. Create the following file on your local filesystem at ``/tmp/my-user-data``: .. code-block:: yaml @@ -87,15 +87,23 @@ successfully: .. code-block:: shell-session $ cloud-init status --wait - ..... - cloud-init status: done - $ + +Which provides the following output: + +.. code-block:: + + status: done We can now verify that cloud-init received the expected user data: .. code-block:: shell-session $ cloud-init query userdata + +Which should print the following to the terminal window: + +.. code-block:: + #cloud-config runcmd: - echo 'Hello, World!' > /var/tmp/hello-world.txt @@ -105,16 +113,24 @@ We can also assert the user data we provided is a valid cloud-config: .. code-block:: shell-session $ cloud-init schema --system --annotate + +Which should print the following: + +.. code-block:: + Valid cloud-config: system userdata - $ Finally, verify that our user data was applied successfully: .. code-block:: shell-session $ cat /var/tmp/hello-world.txt + +Which should then print: + +.. code-block:: + Hello, World! - $ We can see that cloud-init has consumed our user data successfully! -- cgit v1.2.1 From 6e725f36647407d201af0603d7db11fc96a93d4d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 13 Dec 2022 10:55:23 -0600 Subject: Ensure network ready before cloud-init service runs on RHEL (#1893) LP: #1998655 --- systemd/cloud-init.service.tmpl | 1 + 1 file changed, 1 insertion(+) diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index a9e180ee..c86aa4fc 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -16,6 +16,7 @@ After=networking.service "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service +After=NetworkManager-wait-online.service {% endif %} {% if variant in ["suse"] %} After=wicked.service -- cgit v1.2.1 From f10533119e8d429157d3016054f461ea01fe4a03 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 13 Dec 2022 15:23:08 -0700 Subject: test: mock file deletion in dhcp tests (#1911) Currently tests attempt to delete /run/dhclient.pid and /run/dhclient.leases from the host. This leads to host-dependent test failure. Fix it. --- tests/unittests/net/test_dhcp.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py index e4f57b50..40340553 100644 --- a/tests/unittests/net/test_dhcp.py +++ b/tests/unittests/net/test_dhcp.py @@ -376,11 +376,14 @@ class TestDHCPDiscoveryClean(CiTestCase): self.logs.getvalue(), ) + @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("time.sleep", mock.MagicMock()) @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.net.dhcp.util.wait_for_files", return_value=False) - def test_dhcp_discovery_warns_invalid_pid(self, m_wait, m_subp, m_kill): + def test_dhcp_discovery_warns_invalid_pid( + self, m_wait, m_subp, m_kill, m_remove + ): """dhcp_discovery logs a warning when pidfile contains invalid content. Lease processing still occurs and no proc kill is attempted. @@ -422,12 +425,13 @@ class TestDHCPDiscoveryClean(CiTestCase): ) m_kill.assert_not_called() + @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.util.wait_for_files") @mock.patch("cloudinit.net.dhcp.subp.subp") def test_dhcp_discovery_waits_on_lease_and_pid( - self, m_subp, m_wait, m_kill, m_getppid + self, m_subp, m_wait, m_kill, m_getppid, m_remove ): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" m_subp.return_value = ("", "") @@ -446,11 +450,12 @@ class TestDHCPDiscoveryClean(CiTestCase): ) m_kill.assert_not_called() + @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.util.wait_for_files", return_value=False) - def test_dhcp_discovery(self, m_wait, m_subp, m_kill, m_getppid): + def test_dhcp_discovery(self, m_wait, m_subp, m_kill, m_getppid, m_remove): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file. @@ -508,11 +513,14 @@ class TestDHCPDiscoveryClean(CiTestCase): ) m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) + @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") @mock.patch("cloudinit.net.dhcp.os.kill") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.util.wait_for_files") - def test_dhcp_output_error_stream(self, m_wait, m_subp, m_kill, m_getppid): + def test_dhcp_output_error_stream( + self, m_wait, m_subp, m_kill, m_getppid, m_remove + ): """ "dhcp_log_func is called with the output and error streams of dhclient when the callable is passed.""" dhclient_err = "FAKE DHCLIENT ERROR" -- cgit v1.2.1 From 0bfed1dce38f2fa21c06b6f3037e47911c029f4b Mon Sep 17 00:00:00 2001 From: sxt1001 Date: Thu, 15 Dec 2022 05:56:49 +0800 Subject: add utility function test cases (#1910) Add coverage for: - atomic_helper.write_file() - util.human2bytes() --- tests/unittests/test_atomic_helper.py | 10 ++++++++++ tests/unittests/test_util.py | 13 +++++++++++++ 2 files changed, 23 insertions(+) diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index 684a9ae5..1c5deddf 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -30,6 +30,16 @@ class TestAtomicHelper(CiTestCase): atomic_helper.write_file(path, contents, mode=0o400) self.check_file(path, contents, perms=0o400) + def test_file_preserve_permissions(self): + """create a file with mode 700, then write_file with mode 644.""" + path = self.tmp_path("test_file_preserve_permissions") + contents = b"test_file_perms" + with open(path, mode="wb") as f: + f.write(b"test file preserve permissions") + os.chmod(f.name, 0o700) + atomic_helper.write_file(path, contents, preserve_mode=True) + self.check_file(path, contents, perms=0o700) + def test_write_json(self): """write_json output is readable json.""" path = self.tmp_path("test_write_json") diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 0c2735ae..cc516942 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2636,6 +2636,19 @@ class TestGetProcEnv(helpers.TestCase): assert ppid == util.get_proc_ppid("mocked") +class TestHuman2Bytes: + """test util.human2bytes() function""" + + def test_human2bytes(self): + assert util.human2bytes("0.5G") == 536870912 + assert util.human2bytes("100B") == 100 + assert util.human2bytes("100MB") == 104857600 + + for test_i in ["-100MB", "100b", "100mB"]: + with pytest.raises(ValueError): + util.human2bytes(test_i) + + class TestKernelVersion: """test kernel version function""" -- cgit v1.2.1 From 5f5c3e196d3cd4c6b491fa5ab76da141684a81dc Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 15 Dec 2022 08:42:32 -0700 Subject: mounts: document weird prefix in schema (#1913) Add test and support for parsing IEC prefix format. --- cloudinit/config/schemas/schema-cloud-config-v1.json | 2 +- cloudinit/util.py | 18 ++++++++++++++++-- tests/unittests/test_util.py | 5 +++++ 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index a91dc482..7ff80ce3 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -1764,7 +1764,7 @@ "description": "Path to the swap file to create" }, "size": { - "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format where units are one of B, K, M, G or T.", + "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format where units are one of B, K, M, G or T. **WARNING: Attempts to use IEC prefixes in your configuration prior to cloud-init version 23.1 will result in unexpected behavior. SI prefixes names (KB, MB) are required on pre-23.1 cloud-init, however IEC values are used. In summary, assume 1KB == 1024B, not 1000B**", "oneOf": [ { "enum": [ diff --git a/cloudinit/util.py b/cloudinit/util.py index 078ce1c2..96cd1b74 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2761,11 +2761,25 @@ def read_meminfo(meminfo="/proc/meminfo", raw=False): def human2bytes(size): """Convert human string or integer to size in bytes + + In the original implementation, SI prefixes parse to IEC values + (1KB=1024B). Later, support for parsing IEC prefixes was added, + also parsing to IEC values (1KiB=1024B). To maintain backwards + compatibility for the long-used implementation, no fix is provided for SI + prefixes (to make 1KB=1000B may now violate user expectations). + + Future prospective callers of this function should consider implementing a + new function with more standard expectations (1KB=1000B and 1KiB=1024B) + + Examples: 10M => 10485760 - .5G => 536870912 + 10MB => 10485760 + 10MiB => 10485760 """ size_in = size - if size.endswith("B"): + if size.endswith("iB"): + size = size[:-2] + elif size.endswith("B"): size = size[:-1] mpliers = {"B": 1, "K": 2**10, "M": 2**20, "G": 2**30, "T": 2**40} diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index cc516942..fe933c0a 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2648,6 +2648,11 @@ class TestHuman2Bytes: with pytest.raises(ValueError): util.human2bytes(test_i) + def test_ibibytes2bytes(self): + + assert util.human2bytes("0.5GiB") == 536870912 + assert util.human2bytes("100MiB") == 104857600 + class TestKernelVersion: """test kernel version function""" -- cgit v1.2.1 From 6bdd88a7f1d69dc4aac9fa51d663c4b11e3b5217 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 15 Dec 2022 08:52:02 -0700 Subject: read-version: When insufficient tags, use cloudinit.version.get_version --- tools/read-version | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/read-version b/tools/read-version index 50f91b5d..3a0561a3 100755 --- a/tools/read-version +++ b/tools/read-version @@ -90,7 +90,16 @@ if is_gitdir(_tdir) and which("git") and not is_release_branch_ci: branch_name, ] + flags - version = tiny_p(cmd).strip() + try: + version = tiny_p(cmd).strip() + version_long = tiny_p(cmd + ["--long"]).strip() + except subprocess.CalledProcessError as e: + if "No tags can describe" in e.stderr: + print(f"{cmd} found no tags. Using cloudinit.verison.py ") + version = src_version + version_long = "" + else: + raise version_long = tiny_p(cmd + ["--long"]).strip() else: version = src_version -- cgit v1.2.1 From 99c0750fe954a1c166d7dd7c3e9757c2449a5e48 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Thu, 15 Dec 2022 17:13:17 +0100 Subject: doc: improve cc_write_files doc (#1916) --- cloudinit/config/cc_write_files.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index f7d89935..f59b2d54 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -37,15 +37,18 @@ meta: MetaSchema = { before being written. For empty file creation, content can be omitted. .. note:: - if multiline data is provided, care should be taken to ensure that it - follows yaml formatting standards. to specify binary data, use the yaml + If multiline data is provided, care should be taken to ensure that it + follows yaml formatting standards. To specify binary data, use the yaml option ``!!binary`` .. note:: Do not write files under /tmp during boot because of a race with systemd-tmpfiles-clean that can cause temp files to get cleaned during the early boot process. Use /run/somedir instead to avoid race - LP:1707222.""" + LP:1707222. + + .. warning:: + Existing files will be overridden.""" ), "distros": ["all"], "examples": [ -- cgit v1.2.1 From 3a2232084f53fdae901e2bf34726466b8809be5a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 15 Dec 2022 12:13:46 -0700 Subject: tools: read-version drop extra call to git describe --long --- tools/read-version | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/read-version b/tools/read-version index 3a0561a3..5a71e6c7 100755 --- a/tools/read-version +++ b/tools/read-version @@ -100,7 +100,6 @@ if is_gitdir(_tdir) and which("git") and not is_release_branch_ci: version_long = "" else: raise - version_long = tiny_p(cmd + ["--long"]).strip() else: version = src_version version_long = "" -- cgit v1.2.1 From 040090a31f483014ec78cd353bb55bfae121bebd Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 15 Dec 2022 13:03:36 -0700 Subject: nocloud: add support for dmi variable expansion for seedfrom URL NoCloud meta-data seedfrom (or kernel commandline seedfrom) URL can now provide variable expansion for system-specific DMI values as seen in /sys/class/dmi/id on Linux or kenv on FreeBSD platforms. Variable names of the format __dmi.SOME_VAR__ will be replaced when determining the URL from which NoCloud datasource GETs its user-data and meta-data. This allows for a common templated seedfrom URL which can be reused for mass deployments, but can allow for unique URLs based on classes of DMI system characteristics such as chassis serial, product name, UUID etc. LP: #1994980 --- cloudinit/dmi.py | 28 ++++++++ cloudinit/features.py | 11 +++ cloudinit/sources/DataSourceNoCloud.py | 5 +- cloudinit/util.py | 13 ++-- doc/examples/cloud-config-datasources.txt | 2 +- doc/rtd/topics/datasources/nocloud.rst | 48 ++++++++++--- tests/unittests/test_dmi.py | 51 ++++++++++++++ tests/unittests/test_util.py | 111 ++++++++++++++++++++++++++---- 8 files changed, 239 insertions(+), 30 deletions(-) diff --git a/cloudinit/dmi.py b/cloudinit/dmi.py index dff9ab0f..edc64e5c 100644 --- a/cloudinit/dmi.py +++ b/cloudinit/dmi.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import re from collections import namedtuple from typing import Optional @@ -182,4 +183,31 @@ def read_dmi_data(key: str) -> Optional[str]: return None +def sub_dmi_vars(src: str) -> str: + """Replace __dmi.VARNAME__ with DMI values from either sysfs or kenv.""" + if "__" not in src: + return src + valid_dmi_keys = DMIDECODE_TO_KERNEL.keys() + for match in re.findall(r"__dmi\.([^_]+)__", src): + if match not in valid_dmi_keys: + LOG.warning( + "Ignoring invalid __dmi.%s__ in %s. Expected one of: %s.", + match, + src, + valid_dmi_keys, + ) + continue + dmi_value = read_dmi_data(match) + if not dmi_value: + dmi_value = "" + LOG.debug( + "Replacing __dmi.%s__ in '%s' with '%s'.", + match, + src, + dmi_value, + ) + src = src.replace(f"__dmi.{match}__", dmi_value) + return src + + # vi: ts=4 expandtab diff --git a/cloudinit/features.py b/cloudinit/features.py index 62b22e3a..471a4331 100644 --- a/cloudinit/features.py +++ b/cloudinit/features.py @@ -69,6 +69,17 @@ world-readable. Prior to 23.1, netplan configuration is world-readable. (This flag can be removed after Jammy is no longer supported.) """ + +NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH = True +""" +Append a forward slash '/' if NoCloud seedurl does not end with either +a querystring or forward slash. Prior to 23.1, nocloud seedurl would be used +unaltered, appending meta-data, user-data and vendor-data to without URL path +separators. + +(This flag can be removed when Jammy is no longer supported.) +""" + try: # pylint: disable=wildcard-import from cloudinit.feature_overrides import * # noqa diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index fba6aaae..a32bd4d0 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -156,7 +156,7 @@ class DataSourceNoCloud(sources.DataSource): # The special argument "seedfrom" indicates we should # attempt to seed the userdata / metadata from its value # its primarily value is in allowing the user to type less - # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg + # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg/ if "seedfrom" in mydata["meta-data"]: seedfrom = mydata["meta-data"]["seedfrom"] seedfound = False @@ -167,6 +167,9 @@ class DataSourceNoCloud(sources.DataSource): if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False + # check and replace instances of known dmi. such as + # chassis-serial-number or baseboard-product-name + seedfrom = dmi.sub_dmi_vars(seedfrom) # This could throw errors, but the user told us to do it # so if errors are raised, let them raise diff --git a/cloudinit/util.py b/cloudinit/util.py index 96cd1b74..2ba0e077 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -38,7 +38,7 @@ from functools import lru_cache, total_ordering from typing import Callable, Deque, Dict, List, TypeVar from urllib import parse -from cloudinit import importer +from cloudinit import features, importer from cloudinit import log as logging from cloudinit import ( mergers, @@ -976,14 +976,17 @@ def load_yaml(blob, default=None, allowed=(dict,)): def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): if base.find("%s") >= 0: - ud_url = base % ("user-data" + ext) - vd_url = base % ("vendor-data" + ext) - md_url = base % ("meta-data" + ext) + ud_url = base.replace("%s", "user-data" + ext) + vd_url = base.replace("%s", "vendor-data" + ext) + md_url = base.replace("%s", "meta-data" + ext) else: + if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH: + if base[-1] != "/" and parse.urlparse(base).query == "": + # Append fwd slash when no query string and no %s + base += "/" ud_url = "%s%s%s" % (base, "user-data", ext) vd_url = "%s%s%s" % (base, "vendor-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = url_helper.read_file_or_url( md_url, timeout=timeout, retries=retries ) diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 7a8c4284..43b34418 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -31,7 +31,7 @@ datasource: # default seedfrom is None # if found, then it should contain a url with: # /user-data and /meta-data - # seedfrom: http://my.example.com/i-abcde + # seedfrom: http://my.example.com/i-abcde/ seedfrom: None # fs_label: the label on filesystems to be searched for NoCloud source diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index cde64bb1..67a4ea67 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -32,18 +32,46 @@ The permitted keys are: With ``ds=nocloud``, the ``seedfrom`` value must start with ``/`` or ``file://``. With ``ds=nocloud-net``, the ``seedfrom`` value must start -with ``http://`` or ``https://``. - -e.g. you can pass this option to QEMU: +with ``http://`` or ``https://`` and end with a trailing ``/``. + +Cloud-init performs variable expansion of the seedfrom URL for any DMI kernel +variables present in ``/sys/class/dmi/id`` (kenv on FreeBSD). +Your ``seedfrom`` URL can contain variable names of the format +``__dmi.varname__`` to indicate to cloud-init NoCloud datasource that +dmi.varname should be expanded to the value of the DMI system attribute wanted. + +.. list-table:: Available DMI variables for expansion in ``seedfrom`` URL + :widths: 35 35 30 + :header-rows: 0 + + * - ``dmi.baseboard-asset-tag`` + - ``dmi.baseboard-manufacturer`` + - ``dmi.baseboard-version`` + * - ``dmi.bios-release-date`` + - ``dmi.bios-vendor`` + - ``dmi.bios-version`` + * - ``dmi.chassis-asset-tag`` + - ``dmi.chassis-manufacturer`` + - ``dmi.chassis-serial-number`` + * - ``dmi.chassis-version`` + - ``dmi.system-manufacturer`` + - ``dmi.system-product-name`` + * - ``dmi.system-serial-number`` + - ``dmi.system-uuid`` + - ``dmi.system-version`` + + +For example, passing this option to QEMU :: - -smbios type=1,serial=ds=nocloud-net;s=http://10.10.0.1:8000/ + -smbios type=1,serial=ds=nocloud-net;s=http://10.10.0.1:8000/__dmi.chassis-serial-number__/ -to cause NoCloud to fetch the full meta-data from http://10.10.0.1:8000/meta-data +causes NoCloud to fetch the full meta-data from a URL based on YOUR_SERIAL_NUMBER as seen in `/sys/class/dmi/id/chassis_serial_number` (kenv on FreeBSD) from http://10.10.0.1:8000/YOUR_SERIAL_NUMBER/meta-data after the network initialization is complete. -These user-data and meta-data files are expected to be in the following format. +These user-data and meta-data files are required as separate files at the same +base URL. :: @@ -52,10 +80,12 @@ These user-data and meta-data files are expected to be in the following format. Both files are required to be present for it to be considered a valid seed ISO. -Basically, user-data is simply user-data and meta-data is a YAML formatted file -representing what you'd find in the EC2 metadata service. +Basically, user-data is simply :ref:`user data` and +meta-data is a YAML formatted file representing what you'd find in the EC2 +metadata service. -You may also optionally provide a vendor-data file in the following format. +You may also optionally provide a vendor-data file as a separate file adhering +to :ref:`user data formats` in the same base URL. :: diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py index 91d424c1..698e3df8 100644 --- a/tests/unittests/test_dmi.py +++ b/tests/unittests/test_dmi.py @@ -3,6 +3,8 @@ import shutil import tempfile from unittest import mock +import pytest + from cloudinit import dmi, subp, util from tests.unittests import helpers @@ -168,3 +170,52 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): key, val = ("system-product-name", "my_product") self._configure_kenv_return(key, val) self.assertEqual(dmi.read_dmi_data(key), val) + + +class TestSubDMIVars: + + DMI_SRC = ( + "dmi.nope__dmi.system-uuid__/__dmi.uuid____dmi.smbios.system.uuid__" + ) + + @pytest.mark.parametrize( + "is_freebsd, src, read_dmi_data_mocks, warnings, expected", + ( + pytest.param( + False, + DMI_SRC, + [mock.call("system-uuid")], + [ + "Ignoring invalid __dmi.smbios.system.uuid__", + "Ignoring invalid __dmi.uuid__", + ], + "dmi.nope1/__dmi.uuid____dmi.smbios.system.uuid__", + id="match_dmi_distro_agnostic_strings_warn_on_unknown", + ), + pytest.param( + True, + DMI_SRC, + [mock.call("system-uuid")], + [ + "Ignoring invalid __dmi.smbios.system.uuid__", + "Ignoring invalid __dmi.uuid__", + ], + "dmi.nope1/__dmi.uuid____dmi.smbios.system.uuid__", + id="match_dmi_agnostic_and_freebsd_dmi_keys_warn_on_unknown", + ), + ), + ) + def test_sub_dmi_vars( + self, is_freebsd, src, read_dmi_data_mocks, warnings, expected, caplog + ): + with mock.patch.object(dmi, "read_dmi_data") as m_dmi: + m_dmi.side_effect = [ + "1", + "2", + RuntimeError("Too many read_dmi_data calls"), + ] + with mock.patch.object(dmi, "is_FreeBSD", return_value=is_freebsd): + assert expected == dmi.sub_dmi_vars(src) + for warning in warnings: + assert 1 == caplog.text.count(warning) + assert m_dmi.call_args_list == read_dmi_data_mocks diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index fe933c0a..eed387d5 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -16,11 +16,12 @@ import tempfile from collections import deque from textwrap import dedent from unittest import mock +from urllib.parse import urlparse import pytest import yaml -from cloudinit import importer, subp, util +from cloudinit import features, importer, subp, url_helper, util from cloudinit.helpers import Paths from cloudinit.sources import DataSourceHostname from cloudinit.subp import SubpResult @@ -2287,25 +2288,107 @@ class TestMessageFromString(helpers.TestCase): self.assertNotIn("\x00", roundtripped) -class TestReadSeeded(helpers.TestCase): - def setUp(self): - super(TestReadSeeded, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_unicode_not_messed_up(self): +class TestReadSeeded: + def test_unicode_not_messed_up(self, tmpdir): ud = b"userdatablob" vd = b"vendordatablob" helpers.populate_dir( - self.tmp, + tmpdir.strpath, {"meta-data": "key1: val1", "user-data": ud, "vendor-data": vd}, ) - sdir = self.tmp + os.path.sep - (found_md, found_ud, found_vd) = util.read_seeded(sdir) + (found_md, found_ud, found_vd) = util.read_seeded( + tmpdir.strpath + os.path.sep + ) + assert found_md == {"key1": "val1"} + assert found_ud == ud + assert found_vd == vd - self.assertEqual(found_md, {"key1": "val1"}) - self.assertEqual(found_ud, ud) - self.assertEqual(found_vd, vd) + @pytest.mark.parametrize( + "base, feature_flag, req_urls", + ( + pytest.param( + "http://10.0.0.1/%s?qs=1", + True, + [ + "http://10.0.0.1/meta-data?qs=1", + "http://10.0.0.1/user-data?qs=1", + "http://10.0.0.1/vendor-data?qs=1", + ], + id="expand_percent_s_to_data_route", + ), + pytest.param( + "https://10.0.0.1:8008/", + True, + [ + "https://10.0.0.1:8008/meta-data", + "https://10.0.0.1:8008/user-data", + "https://10.0.0.1:8008/vendor-data", + ], + id="no_duplicate_forward_slash_when_already_present", + ), + pytest.param( + "https://10.0.0.1:8008", + True, + [ + "https://10.0.0.1:8008/meta-data", + "https://10.0.0.1:8008/user-data", + "https://10.0.0.1:8008/vendor-data", + ], + id="append_fwd_slash_on_routes_when_absent_and_no_query_str", + ), + pytest.param( + "https://10.0.0.1:8008", + False, + [ + "https://10.0.0.1:8008meta-data", + "https://10.0.0.1:8008user-data", + "https://10.0.0.1:8008vendor-data", + ], + id="feature_off_append_fwd_slash_when_absent_and_no_query_str", + ), + pytest.param( + "https://10.0.0.1:8008?qs=", + True, + [ + "https://10.0.0.1:8008?qs=meta-data", + "https://10.0.0.1:8008?qs=user-data", + "https://10.0.0.1:8008?qs=vendor-data", + ], + id="avoid_trailing_forward_slash_on_routes_with_query_strings", + ), + ), + ) + @mock.patch(M_PATH + "url_helper.read_file_or_url") + def test_handle_http_urls( + self, m_read, base, feature_flag, req_urls, tmpdir + ): + def fake_response(url, timeout, retries): + parsed_url = urlparse(url) + path = parsed_url.path + if not path: + if parsed_url.query: + _key, _, md_type = parsed_url.query.partition("=") + else: + _url, _, md_type = parsed_url.netloc.partition("8008") + path = f"/{md_type}" + return url_helper.StringResponse(f"{path}: 1") + + m_read.side_effect = fake_response + + with mock.patch.object( + features, + "NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH", + feature_flag, + ): + (found_md, found_ud, found_vd) = util.read_seeded(base) + # Meta-data treated as YAML + assert found_md == {"/meta-data": 1} + # user-data, vendor-data read raw. It could be scripts or other format + assert found_ud == "/user-data: 1" + assert found_vd == "/vendor-data: 1" + assert [ + mock.call(req_url, timeout=5, retries=10) for req_url in req_urls + ] == m_read.call_args_list class TestReadSeededWithoutVendorData(helpers.TestCase): -- cgit v1.2.1 From 7368071568f444b576b49ffe9ef83e3928c9e8bb Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Sat, 24 Dec 2022 20:24:00 -0700 Subject: pycloudlib: add lunar support for integration tests (#1928) Bump to fe7facd3 --- integration-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-requirements.txt b/integration-requirements.txt index 8db89d2a..e539d4ac 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@898049262cc0b71a142f13f623d3df679a1ec5c9 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@fe7facd3676b6f125bd7ab7e2141a48c714d77a8 pytest -- cgit v1.2.1 From 0bcb8048a66959bcddd99f5959f69917b3bc5bc9 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 3 Jan 2023 09:08:23 -0700 Subject: test: mock dns calls (#1922) Add fixture to disallow dns lookups by default in a common utility function. --- tests/unittests/config/test_apt_source_v3.py | 3 +++ tests/unittests/conftest.py | 16 ++++++++++++++++ tests/unittests/sources/test_aliyun.py | 3 ++- tests/unittests/sources/test_ec2.py | 7 +++++++ tests/unittests/sources/test_openstack.py | 7 +++++++ tox.ini | 1 + 6 files changed, 36 insertions(+), 1 deletion(-) diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py index 5bb87385..8d7ba5dc 100644 --- a/tests/unittests/config/test_apt_source_v3.py +++ b/tests/unittests/config/test_apt_source_v3.py @@ -14,6 +14,8 @@ import tempfile from unittest import TestCase, mock from unittest.mock import call +import pytest + from cloudinit import gpg, subp, util from cloudinit.config import cc_apt_configure from tests.unittests import helpers as t_help @@ -955,6 +957,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertEqual(mirrors["PRIMARY"], pmir) self.assertEqual(mirrors["SECURITY"], smir) + @pytest.mark.allow_dns_lookup def test_apt_v3_url_resolvable(self): """test_apt_v3_url_resolvable - Test resolving urls""" diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index 1ab17e8b..a3daaf22 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -2,6 +2,7 @@ import builtins import glob import os from pathlib import Path +from unittest import mock import pytest @@ -58,6 +59,21 @@ def fake_filesystem(mocker, tmpdir): mocker.patch.object(mod, f, trap_func) +@pytest.fixture(autouse=True) +def disable_dns_lookup(request): + if "allow_dns_lookup" in request.keywords: + yield + return + + def side_effect(args, *other_args, **kwargs): + raise AssertionError("Unexpectedly used util.is_resolvable") + + with mock.patch( + "cloudinit.util.is_resolvable", side_effect=side_effect, autospec=True + ): + yield + + PYTEST_VERSION_TUPLE = tuple(map(int, pytest.__version__.split("."))) if PYTEST_VERSION_TUPLE < (3, 9, 0): diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py index 6ceaf8f4..f95923a4 100644 --- a/tests/unittests/sources/test_aliyun.py +++ b/tests/unittests/sources/test_aliyun.py @@ -161,8 +161,9 @@ class TestAliYunDatasource(test_helpers.ResponsesTestCase): self.default_metadata["hostname"], self.ds.get_hostname().hostname ) + @mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable") @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - def test_with_mock_server(self, m_is_aliyun): + def test_with_mock_server(self, m_is_aliyun, m_resolv): m_is_aliyun.return_value = True self.regist_default_server() ret = self.ds.get_data() diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py index 4dd7c497..3fe525e3 100644 --- a/tests/unittests/sources/test_ec2.py +++ b/tests/unittests/sources/test_ec2.py @@ -5,6 +5,7 @@ import json import threading from unittest import mock +import pytest import requests import responses @@ -223,6 +224,12 @@ TAGS_METADATA_2021_03_23: dict = { } +@pytest.fixture(autouse=True) +def disable_is_resolvable(): + with mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable"): + yield + + def _register_ssh_keys(rfunc, base_url, keys_data): """handle ssh key inconsistencies. diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 8bcecae7..75a0dda1 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -10,6 +10,7 @@ import re from io import StringIO from urllib.parse import urlparse +import pytest import responses from cloudinit import helpers, settings, util @@ -76,6 +77,12 @@ EC2_VERSIONS = [ MOCK_PATH = "cloudinit.sources.DataSourceOpenStack." +@pytest.fixture(autouse=True) +def mock_is_resolvable(): + with mock.patch(f"{MOCK_PATH}util.is_resolvable"): + yield + + # TODO _register_uris should leverage test_ec2.register_mock_metaserver. def _register_uris(version, ec2_files, ec2_meta, os_files, *, responses_mock): """Registers a set of url patterns into responses that will mimic the diff --git a/tox.ini b/tox.ini index dd7973b7..3b668bc0 100644 --- a/tox.ini +++ b/tox.ini @@ -318,3 +318,4 @@ markers = ubuntu: this test should run on Ubuntu unstable: skip this test because it is flakey user_data: the user data to be passed to the test instance + allow_dns_lookup: disable autochecking for host network configuration -- cgit v1.2.1 From 36646706e14219bc43958972c5ffd4cf69b5eb3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mina=20Gali=C4=87?= Date: Tue, 3 Jan 2023 16:31:26 +0000 Subject: BSD: fix duplicate macs in Ifconfig parser (#1917) BSD: fix duplicate macs in Ifconfig parser Some cloud providers can have more than one device with the same MAC address. This PR allows parsing and storing and retrieving such configurations. We now use `defaultdict` to retrieve `ifs_by_mac`, before converting it all to a `dict`. We also store the two in separate variables. Add test case from Azure to verify, and test data in a new file, since our old cloudinit.net functions can't handle it. Sponsored by: FreeBSD Foundation --- cloudinit/distros/parsers/ifconfig.py | 26 +++++++++++----------- .../netinfo/freebsd-duplicate-macs-ifconfig-output | 13 +++++++++++ tests/data/netinfo/freebsd-ifconfig-output | 2 +- tests/unittests/distros/test_ifconfig.py | 16 +++++++++++++ 4 files changed, 43 insertions(+), 14 deletions(-) create mode 100644 tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output diff --git a/cloudinit/distros/parsers/ifconfig.py b/cloudinit/distros/parsers/ifconfig.py index 35f728e0..3e57e41a 100644 --- a/cloudinit/distros/parsers/ifconfig.py +++ b/cloudinit/distros/parsers/ifconfig.py @@ -6,9 +6,10 @@ import copy import re +from collections import defaultdict from functools import lru_cache from ipaddress import IPv4Address, IPv4Interface, IPv6Interface -from typing import Dict, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union from cloudinit import log as logging @@ -87,10 +88,11 @@ class Ifconfig: """ def __init__(self): - self._ifs = {} + self._ifs_by_name = {} + self._ifs_by_mac = {} @lru_cache() - def parse(self, text: str) -> Dict[str, Ifstate]: + def parse(self, text: str) -> Dict[str, Union[Ifstate, List[Ifstate]]]: """ Parse the ``ifconfig -a`` output ``text``, into a dict of ``Ifstate`` objects, referenced by ``name`` *and* by ``mac`` address. @@ -104,6 +106,7 @@ class Ifconfig: @returns: A dict of ``Ifstate``s, referenced by ``name`` and ``mac`` """ ifindex = 0 + ifs_by_mac = defaultdict(list) for line in text.splitlines(): if len(line) == 0: continue @@ -119,7 +122,7 @@ class Ifconfig: curif = curif[:-1] dev = Ifstate(curif) dev.index = ifindex - self._ifs[curif] = dev + self._ifs_by_name[curif] = dev toks = line.lower().strip().split() @@ -157,10 +160,10 @@ class Ifconfig: if toks[0] == "ether": dev.mac = toks[1] dev.macs.append(toks[1]) - self._ifs[toks[1]] = dev + ifs_by_mac[toks[1]].append(dev) if toks[0] == "hwaddr": dev.macs.append(toks[1]) - self._ifs[toks[1]] = dev + ifs_by_mac[toks[1]].append(dev) if toks[0] == "groups:": dev.groups += toks[1:] @@ -195,17 +198,14 @@ class Ifconfig: if toks[i] == "interface:": dev.vlan["link"] = toks[i + 1] - return self._ifs + self._ifs_by_mac = dict(ifs_by_mac) + return {**self._ifs_by_name, **self._ifs_by_mac} def ifs_by_name(self): - return { - k: v for (k, v) in self._ifs.items() if not re.fullmatch(MAC_RE, k) - } + return self._ifs_by_name def ifs_by_mac(self): - return { - k: v for (k, v) in self._ifs.items() if re.fullmatch(MAC_RE, k) - } + return self._ifs_by_mac def _parse_inet(self, toks: list) -> Tuple[str, dict]: broadcast = None diff --git a/tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output b/tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output new file mode 100644 index 00000000..769fe6df --- /dev/null +++ b/tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output @@ -0,0 +1,13 @@ +hn0: flags=8843 metric 0 mtu 1500 + options=8051b + ether 00:0d:3a:54:ad:1e + inet 10.0.0.35 netmask 0xffffff00 broadcast 10.0.0.255 + media: Ethernet 100GBase-CR4 + status: active + nd6 options=29 +mce0: flags=8a43 metric 0 mtu 1500 + options=8805bb + ether 00:0d:3a:54:ad:1e + media: Ethernet 100GBase-CR4 + status: active + nd6 options=29 \ No newline at end of file diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output index 3ca0d2b2..05a69b88 100644 --- a/tests/data/netinfo/freebsd-ifconfig-output +++ b/tests/data/netinfo/freebsd-ifconfig-output @@ -38,4 +38,4 @@ lo0: flags=8049 metric 0 mtu 16384 inet6 fe80::1%lo0 prefixlen 64 scopeid 0x2 inet 127.0.0.1 netmask 0xff000000 groups: lo - nd6 options=21 + nd6 options=21 \ No newline at end of file diff --git a/tests/unittests/distros/test_ifconfig.py b/tests/unittests/distros/test_ifconfig.py index ce595746..c5c1dee9 100644 --- a/tests/unittests/distros/test_ifconfig.py +++ b/tests/unittests/distros/test_ifconfig.py @@ -38,6 +38,22 @@ class TestIfconfigParserFreeBSD(TestCase): ifs = Ifconfig().parse(self.ifs_txt) assert "txcsum" in ifs["vtnet0"].options + def test_duplicate_mac(self): + """ + assert that we can have duplicate macs, and that it's not an accident + """ + self.ifs_txt = readResource( + "netinfo/freebsd-duplicate-macs-ifconfig-output" + ) + ifc = Ifconfig() + ifc.parse(self.ifs_txt) + ifs_by_mac = ifc.ifs_by_mac() + assert len(ifs_by_mac["00:0d:3a:54:ad:1e"]) == 2 + assert ( + ifs_by_mac["00:0d:3a:54:ad:1e"][0].name + != ifs_by_mac["00:0d:3a:54:ad:1e"][1].name + ) + class TestIfconfigParserOpenBSD(TestCase): def setUp(self): -- cgit v1.2.1 From efe0201932e5a6ffaa20f489016b0cf0342d646f Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Tue, 3 Jan 2023 16:22:56 -0500 Subject: sources/azure: fix device driver matching for net config (#1914) The ordering of NICs provided by IMDS may not match the order enumerated by kernel. As such, we do not have any guarantee that the nic we're checking the driver for is the nic we think it is. Instead of making any assumptions about how the nics are named, check all interfaces by mac address. If there is an interface using "hv_netvsc", match against that. If there is only one interface driver that is not blacklisted, use that (in case it is not "hv_netvsc"), but log a debug event. If there are multiple hits, don't match against any of the names and report a warning. Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 35 +++++- tests/unittests/sources/test_azure.py | 204 ++++++++++++++++++++++------------ 2 files changed, 164 insertions(+), 75 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index db9234db..c65e17a7 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,7 +22,6 @@ from cloudinit import dmi from cloudinit import log as logging from cloudinit import net, sources, ssh_util, subp, util from cloudinit.event import EventScope, EventType -from cloudinit.net import device_driver from cloudinit.net.dhcp import ( NoDHCPLeaseError, NoDHCPLeaseInterfaceError, @@ -208,6 +207,33 @@ def get_hv_netvsc_macs_normalized() -> List[str]: ] +@azure_ds_telemetry_reporter +def determine_device_driver_for_mac(mac: str) -> Optional[str]: + """Determine the device driver to match on, if any.""" + drivers = [ + i[2] + for i in net.get_interfaces(blacklist_drivers=BLACKLIST_DRIVERS) + if mac == normalize_mac_address(i[1]) + ] + if "hv_netvsc" in drivers: + return "hv_netvsc" + + if len(drivers) == 1: + report_diagnostic_event( + "Assuming driver for interface with mac=%s drivers=%r" + % (mac, drivers), + logger_func=LOG.debug, + ) + return drivers[0] + + report_diagnostic_event( + "Unable to specify driver for interface with mac=%s drivers=%r" + % (mac, drivers), + logger_func=LOG.warning, + ) + return None + + def execute_or_debug(cmd, fail_ret=None) -> str: try: return subp.subp(cmd).stdout # pyright: ignore @@ -2046,11 +2072,8 @@ def generate_network_config_from_instance_network_metadata( dev_config.update( {"match": {"macaddress": mac.lower()}, "set-name": nicname} ) - # With netvsc, we can get two interfaces that - # share the same MAC, so we need to make sure - # our match condition also contains the driver - driver = device_driver(nicname) - if driver and driver == "hv_netvsc": + driver = determine_device_driver_for_mac(mac) + if driver: dev_config["match"]["driver"] = driver netconfig["ethernets"][nicname] = dev_config continue diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index 24fa061c..da2612e8 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -92,16 +92,6 @@ def mock_chassis_asset_tag(): yield m -@pytest.fixture -def mock_device_driver(): - with mock.patch( - MOCKPATH + "device_driver", - autospec=True, - return_value=None, - ) as m: - yield m - - @pytest.fixture def mock_generate_fallback_config(): with mock.patch( @@ -173,9 +163,19 @@ def mock_net_dhcp_EphemeralIPv4Network(): yield m -@pytest.fixture +@pytest.fixture(autouse=True) def mock_get_interfaces(): - with mock.patch(MOCKPATH + "net.get_interfaces", return_value=[]) as m: + with mock.patch( + MOCKPATH + "net.get_interfaces", + return_value=[ + ("dummy0", "9e:65:d6:19:19:01", None, None), + ("enP3", "00:11:22:33:44:02", "unknown_accel", "0x3"), + ("eth0", "00:11:22:33:44:00", "hv_netvsc", "0x3"), + ("eth2", "00:11:22:33:44:01", "unknown", "0x3"), + ("eth3", "00:11:22:33:44:02", "unknown_with_unknown_vf", "0x3"), + ("lo", "00:00:00:00:00:00", None, None), + ], + ) as m: yield m @@ -506,6 +506,116 @@ class TestGenerateNetworkConfig: "version": 2, }, ), + ( + "hv_netvsc driver", + { + "interface": [ + { + "macAddress": "001122334400", + "ipv6": {"ipAddress": []}, + "ipv4": { + "subnet": [ + {"prefix": "24", "address": "10.0.0.0"} + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81", + } + ], + }, + } + ] + }, + { + "ethernets": { + "eth0": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": False, + "match": { + "macaddress": "00:11:22:33:44:00", + "driver": "hv_netvsc", + }, + "set-name": "eth0", + } + }, + "version": 2, + }, + ), + ( + "unknown", + { + "interface": [ + { + "macAddress": "001122334401", + "ipv6": {"ipAddress": []}, + "ipv4": { + "subnet": [ + {"prefix": "24", "address": "10.0.0.0"} + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81", + } + ], + }, + } + ] + }, + { + "ethernets": { + "eth0": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": False, + "match": { + "macaddress": "00:11:22:33:44:01", + "driver": "unknown", + }, + "set-name": "eth0", + } + }, + "version": 2, + }, + ), + ( + "unknown with unknown matching VF", + { + "interface": [ + { + "macAddress": "001122334402", + "ipv6": {"ipAddress": []}, + "ipv4": { + "subnet": [ + {"prefix": "24", "address": "10.0.0.0"} + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81", + } + ], + }, + } + ] + }, + { + "ethernets": { + "eth0": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": False, + "match": { + "macaddress": "00:11:22:33:44:02", + }, + "set-name": "eth0", + } + }, + "version": 2, + }, + ), ( "multiple interfaces with increasing route metric", { @@ -648,7 +758,7 @@ class TestGenerateNetworkConfig: ], ) def test_parsing_scenarios( - self, label, mock_device_driver, metadata, expected + self, label, mock_get_interfaces, metadata, expected ): assert ( dsaz.generate_network_config_from_instance_network_metadata( @@ -657,27 +767,6 @@ class TestGenerateNetworkConfig: == expected ) - def test_match_hv_netvsc(self, mock_device_driver): - mock_device_driver.return_value = "hv_netvsc" - - assert dsaz.generate_network_config_from_instance_network_metadata( - NETWORK_METADATA["network"] - ) == { - "ethernets": { - "eth0": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": False, - "match": { - "macaddress": "00:0d:3a:04:75:98", - "driver": "hv_netvsc", - }, - "set-name": "eth0", - } - }, - "version": 2, - } - class TestNetworkConfig: fallback_config = { @@ -693,7 +782,9 @@ class TestNetworkConfig: ], } - def test_single_ipv4_nic_configuration(self, azure_ds, mock_device_driver): + def test_single_ipv4_nic_configuration( + self, azure_ds, mock_get_interfaces + ): """Network config emits dhcp on single nic with ipv4""" expected = { "ethernets": { @@ -712,7 +803,7 @@ class TestNetworkConfig: assert azure_ds.network_config == expected def test_uses_fallback_cfg_when_apply_network_config_is_false( - self, azure_ds, mock_device_driver, mock_generate_fallback_config + self, azure_ds, mock_generate_fallback_config ): azure_ds.ds_cfg["apply_network_config"] = False azure_ds._metadata_imds = NETWORK_METADATA @@ -721,7 +812,7 @@ class TestNetworkConfig: assert azure_ds.network_config == self.fallback_config def test_uses_fallback_cfg_when_imds_metadata_unset( - self, azure_ds, mock_device_driver, mock_generate_fallback_config + self, azure_ds, mock_generate_fallback_config ): azure_ds._metadata_imds = UNSET mock_generate_fallback_config.return_value = self.fallback_config @@ -729,7 +820,7 @@ class TestNetworkConfig: assert azure_ds.network_config == self.fallback_config def test_uses_fallback_cfg_when_no_network_metadata( - self, azure_ds, mock_device_driver, mock_generate_fallback_config + self, azure_ds, mock_generate_fallback_config ): """Network config generates fallback network config when the IMDS instance metadata is corrupted/invalid, such as when @@ -745,7 +836,7 @@ class TestNetworkConfig: assert azure_ds.network_config == self.fallback_config def test_uses_fallback_cfg_when_no_interface_metadata( - self, azure_ds, mock_device_driver, mock_generate_fallback_config + self, azure_ds, mock_generate_fallback_config ): """Network config generates fallback network config when the IMDS instance metadata is corrupted/invalid, such as when @@ -1069,13 +1160,6 @@ scbus-1 on xpt0 bus 0 self.m_get_metadata_from_fabric = mock.MagicMock(return_value=[]) self.m_report_failure_to_fabric = mock.MagicMock(autospec=True) - self.m_get_interfaces = mock.MagicMock( - return_value=[ - ("dummy0", "9e:65:d6:19:19:01", None, None), - ("eth0", "00:15:5d:69:63:ba", "hv_netvsc", "0x3"), - ("lo", "00:00:00:00:00:00", None, None), - ] - ) self.m_list_possible_azure_ds = mock.MagicMock( side_effect=_load_possible_azure_ds ) @@ -1119,11 +1203,6 @@ scbus-1 on xpt0 bus 0 "get_interface_mac", mock.MagicMock(return_value="00:15:5d:69:63:ba"), ), - ( - dsaz.net, - "get_interfaces", - self.m_get_interfaces, - ), (dsaz.subp, "which", lambda x: True), ( dsaz.dmi, @@ -1537,10 +1616,7 @@ scbus-1 on xpt0 bus 0 self.assertTrue(os.path.isdir(self.waagent_d)) self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) - @mock.patch( - "cloudinit.sources.DataSourceAzure.device_driver", return_value=None - ) - def test_network_config_set_from_imds(self, m_driver): + def test_network_config_set_from_imds(self): """Datasource.network_config returns IMDS network data.""" sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} data = { @@ -1563,12 +1639,7 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) - @mock.patch( - "cloudinit.sources.DataSourceAzure.device_driver", return_value=None - ) - def test_network_config_set_from_imds_route_metric_for_secondary_nic( - self, m_driver - ): + def test_network_config_set_from_imds_route_metric_for_secondary_nic(self): """Datasource.network_config adds route-metric to secondary nics.""" sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} data = { @@ -1614,12 +1685,7 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) - @mock.patch( - "cloudinit.sources.DataSourceAzure.device_driver", return_value=None - ) - def test_network_config_set_from_imds_for_secondary_nic_no_ip( - self, m_driver - ): + def test_network_config_set_from_imds_for_secondary_nic_no_ip(self): """If an IP address is empty then there should no config for it.""" sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} data = { @@ -2157,7 +2223,7 @@ scbus-1 on xpt0 bus 0 [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list ) - @mock.patch(MOCKPATH + "net.get_interfaces", autospec=True) + @mock.patch(MOCKPATH + "net.get_interfaces") def test_blacklist_through_distro(self, m_net_get_interfaces): """Verify Azure DS updates blacklist drivers in the distro's networking object.""" @@ -2175,7 +2241,7 @@ scbus-1 on xpt0 bus 0 ) distro.networking.get_interfaces_by_mac() - self.m_get_interfaces.assert_called_with( + m_net_get_interfaces.assert_called_with( blacklist_drivers=dsaz.BLACKLIST_DRIVERS ) -- cgit v1.2.1 From 5a557755345f7f6d1cdc6d0c6cd4ef4be501ec4f Mon Sep 17 00:00:00 2001 From: eb3095 <45504889+eb3095@users.noreply.github.com> Date: Wed, 4 Jan 2023 05:20:10 -0500 Subject: Fix typo with package_update/package_upgrade (#1927) test/doc: Fix typo with package_update/package_upgrade There's a typo in the docs that can lead to confusion. packages_update > package_update packages_upgrade > package_upgrade --- doc/examples/cloud-config-ansible-controller.txt | 4 ++-- doc/examples/cloud-config-ansible-pull.txt | 4 ++-- tests/integration_tests/modules/test_ansible.py | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/examples/cloud-config-ansible-controller.txt b/doc/examples/cloud-config-ansible-controller.txt index 389f8f88..da2f58f0 100644 --- a/doc/examples/cloud-config-ansible-controller.txt +++ b/doc/examples/cloud-config-ansible-controller.txt @@ -4,8 +4,8 @@ # This example installs a playbook repository from a remote private repository # and then runs two of the plays. -packages_update: true -packages_upgrade: true +package_update: true +package_upgrade: true packages: - git - python3-pip diff --git a/doc/examples/cloud-config-ansible-pull.txt b/doc/examples/cloud-config-ansible-pull.txt index 62acc5a9..73985772 100644 --- a/doc/examples/cloud-config-ansible-pull.txt +++ b/doc/examples/cloud-config-ansible-pull.txt @@ -1,6 +1,6 @@ #cloud-config -packages_update: true -packages_upgrade: true +package_update: true +package_upgrade: true # if you're already installing other packages, you may # wish to manually install ansible to avoid multiple calls diff --git a/tests/integration_tests/modules/test_ansible.py b/tests/integration_tests/modules/test_ansible.py index d781dabf..587385d4 100644 --- a/tests/integration_tests/modules/test_ansible.py +++ b/tests/integration_tests/modules/test_ansible.py @@ -12,8 +12,8 @@ REPO_D = "/root/playbooks" USER_DATA = """\ #cloud-config version: v1 -packages_update: true -packages_upgrade: true +package_update: true +package_upgrade: true packages: - git - python3-pip @@ -114,8 +114,8 @@ ANSIBLE_CONTROL = """\ # This example installs a playbook repository from a remote private repository # and then runs two of the plays. -packages_update: true -packages_upgrade: true +package_update: true +package_upgrade: true packages: - git - python3-pip -- cgit v1.2.1 From 65557c34bb1aa31a753f19db6a57a3e6ef1c2e39 Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Thu, 5 Jan 2023 16:54:51 +0000 Subject: cc_disk_setup.py: fix MBR single partition creation (#1932) Fixes the creation of single partitions on MBR devices. Currently this fails with the following debug output: cc_disk_setup.py[DEBUG]: Calculating partition layout cc_disk_setup.py[DEBUG]: Layout is: 0, cc_disk_setup.py[DEBUG]: Creating partition table on /dev/sdb subp.py[DEBUG]: Running command ['/sbin/sfdisk', '--Linux', '--unit=S', '--force', '/dev/sdb'] with allowed return codes [0] (shell=False, capture=True) util.py[DEBUG]: Creating partition on /dev/sdb took 0.237 seconds util.py[WARNING]: Failed partitioning operation Failed to partition device /dev/sdb Unexpected error while running command. Command: ['/sbin/sfdisk/', '--Linux', '--unit=S', '--force', '/dev/sdb'] Exit code: 1 Reason: - Stdout: Checking that no-one is using this disk right now ... OK Disk /dev/sdb: 16 MiB, 16777216 bytes, 32768 sectors Disk model: HARDDISK Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes >>> Created a new DOS disklabel with disk identifier 0xb3604c9a. /dev/sdb1: Leaving. Stderr: sfdisk: --Linux option is unnecessary and deprecated Start sector 0 out of range. Failed to add #1 partition: Result not representable util.py[DEBUG]: Failed partitioning operation On a BIOS/MBR partitioned device the 1st partition cannot start at sector 0 as this is reserved for the MBR. Documentation clarifications/corrections and additional examples added. Also remove "--Linux" and "--unit=S" options from sfdisk calls, these options have been deprecated since October 2014. Note: This is not a change of behavior because the change provoking the error was introduced in util-linux 2.26 in Xenial. Thus, every supported cloud-init version fails. LP: #1851438 --- cloudinit/config/cc_disk_setup.py | 16 ++++++-- .../config/schemas/schema-cloud-config-v1.json | 4 +- doc/examples/cloud-config-disk-setup.txt | 48 +++++++++++----------- tests/unittests/config/test_cc_disk_setup.py | 2 +- 4 files changed, 39 insertions(+), 31 deletions(-) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 71d52d3d..5ec5c793 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -80,6 +80,10 @@ meta: MetaSchema = { table_type: gpt layout: [[100, 82]] overwrite: true + /dev/sdd: + table_type: mbr + layout: true + overwrite: true fs_setup: - label: fs1 filesystem: ext4 @@ -91,10 +95,14 @@ meta: MetaSchema = { - label: swap device: swap_disk.1 filesystem: swap + - label: fs3 + device: /dev/sdd1 + filesystem: ext4 mounts: - ["my_alias.1", "/mnt1"] - ["my_alias.2", "/mnt2"] - ["swap_disk.1", "none", "swap", "sw", "0", "0"] + - ["/dev/sdd1", "/mnt3"] """ ) ], @@ -605,8 +613,8 @@ def get_partition_mbr_layout(size, layout): """ if not isinstance(layout, list) and isinstance(layout, bool): - # Create a single partition - return "0," + # Create a single partition, default to Linux + return ",,83" if (len(layout) == 0 and isinstance(layout, list)) or not isinstance( layout, list @@ -741,7 +749,7 @@ def exec_mkpart_mbr(device, layout): types, i.e. gpt """ # Create the partitions - prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device] + prt_cmd = [SFDISK_CMD, "--force", device] try: subp.subp(prt_cmd, data="%s\n" % layout) except Exception as e: @@ -968,7 +976,7 @@ def mkfs(fs_cfg): odevice = device LOG.debug("Identifying device to create %s filesytem on", label) - # any mean pick the first match on the device with matching fs_type + # 'any' means pick the first match on the device with matching fs_type label_match = True if partition.lower() == "any": label_match = False diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index 7ff80ce3..42745b28 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -1180,7 +1180,7 @@ }, "device": { "type": "string", - "description": "Specified either as a path or as an alias in the format ``.`` where ```` denotes the partition number on the device. If specifying device using the ``.`` format, the value of ``partition`` will be overwritten." + "description": "Specified either as a path or as an alias in the format ``.`` where ```` denotes the partition number on the device. If specifying device using the ``.`` format, the value of ``partition`` will be overwritten." }, "partition": { "type": [ @@ -1197,7 +1197,7 @@ ] } ], - "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``." + "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``filesystem`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any filesystem that matches ``filesystem`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``." }, "overwrite": { "type": "boolean", diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt index cdd176d3..3c8fc36c 100644 --- a/doc/examples/cloud-config-disk-setup.txt +++ b/doc/examples/cloud-config-disk-setup.txt @@ -1,5 +1,5 @@ #cloud-config -# Cloud-init supports the creation of simple partition tables and file systems +# Cloud-init supports the creation of simple partition tables and filesystems # on devices. # Default disk definitions for AWS @@ -147,13 +147,13 @@ disk_setup: # If layout is set to "true" and overwrite is set to "false", # it will skip partitioning the device without a failure. # -# overwrite=: This describes whether to ride with saftey's on and +# overwrite=: This describes whether to ride with safetys on and # everything holstered. # # 'false' is the default, which means that: # 1. The device will be checked for a partition table -# 2. The device will be checked for a file system -# 3. If either a partition of file system is found, then +# 2. The device will be checked for a filesystem +# 3. If either a partition of filesystem is found, then # the operation will be _skipped_. # # 'true' is cowboy mode. There are no checks and things are @@ -161,10 +161,10 @@ disk_setup: # really, really don't want to do. # # -# fs_setup: Setup the file system -# ------------------------------- +# fs_setup: Setup the filesystem +# ------------------------------ # -# fs_setup describes the how the file systems are supposed to look. +# fs_setup describes the how the filesystems are supposed to look. fs_setup: - label: ephemeral0 @@ -189,10 +189,10 @@ fs_setup: # replace_fs: # # Where: -#