summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2023-03-27 10:36:03 -0500
committerJames Falcon <james.falcon@canonical.com>2023-03-27 10:36:03 -0500
commite5639bce8eecd7fd9bf7f107aeb34a2d974638dd (patch)
tree557c7bfdbb4814e0943de3e89187548c14e1f6ce
parent4dd8cda76eac164790a678d5794aa67326d90849 (diff)
parent4b6e4e13159b8db20246818bdce5b893f348ad99 (diff)
downloadcloud-init-git-e5639bce8eecd7fd9bf7f107aeb34a2d974638dd.tar.gz
merge from upstream/main at 23.1-36-g4b6e4e13
-rw-r--r--.github/workflows/integration.yml2
-rw-r--r--ChangeLog5
-rw-r--r--bash_completion/cloud-init5
-rw-r--r--cloudinit/analyze/show.py15
-rw-r--r--cloudinit/apport.py24
-rwxr-xr-xcloudinit/cmd/devel/hotplug_hook.py2
-rwxr-xr-xcloudinit/cmd/main.py18
-rw-r--r--cloudinit/config/cc_ansible.py7
-rw-r--r--cloudinit/config/cc_apk_configure.py5
-rw-r--r--cloudinit/config/cc_apt_configure.py37
-rw-r--r--cloudinit/config/cc_apt_pipelining.py14
-rw-r--r--cloudinit/config/cc_bootcmd.py14
-rw-r--r--cloudinit/config/cc_byobu.py16
-rw-r--r--cloudinit/config/cc_ca_certs.py77
-rw-r--r--cloudinit/config/cc_chef.py40
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py12
-rw-r--r--cloudinit/config/cc_disk_setup.py55
-rw-r--r--cloudinit/config/cc_fan.py5
-rw-r--r--cloudinit/config/cc_final_message.py15
-rw-r--r--cloudinit/config/cc_growpart.py34
-rw-r--r--cloudinit/config/cc_grub_dpkg.py128
-rw-r--r--cloudinit/config/cc_install_hotplug.py15
-rw-r--r--cloudinit/config/cc_keyboard.py5
-rw-r--r--cloudinit/config/cc_keys_to_console.py14
-rw-r--r--cloudinit/config/cc_landscape.py9
-rw-r--r--cloudinit/config/cc_locale.py11
-rw-r--r--cloudinit/config/cc_lxd.py27
-rw-r--r--cloudinit/config/cc_mcollective.py8
-rw-r--r--cloudinit/config/cc_migrator.py17
-rw-r--r--cloudinit/config/cc_mounts.py51
-rw-r--r--cloudinit/config/cc_ntp.py5
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py26
-rw-r--r--cloudinit/config/cc_phone_home.py20
-rw-r--r--cloudinit/config/cc_power_state_change.py47
-rw-r--r--cloudinit/config/cc_puppet.py37
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py22
-rw-r--r--cloudinit/config/cc_reset_rmc.py7
-rw-r--r--cloudinit/config/cc_resizefs.py67
-rw-r--r--cloudinit/config/cc_resolv_conf.py13
-rw-r--r--cloudinit/config/cc_rh_subscription.py9
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py21
-rw-r--r--cloudinit/config/cc_rsyslog.py21
-rw-r--r--cloudinit/config/cc_runcmd.py10
-rw-r--r--cloudinit/config/cc_salt_minion.py9
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py9
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py10
-rw-r--r--cloudinit/config/cc_scripts_per_once.py9
-rw-r--r--cloudinit/config/cc_scripts_user.py9
-rw-r--r--cloudinit/config/cc_scripts_vendor.py9
-rw-r--r--cloudinit/config/cc_seed_random.py9
-rw-r--r--cloudinit/config/cc_set_hostname.py17
-rw-r--r--cloudinit/config/cc_set_passwords.py47
-rw-r--r--cloudinit/config/cc_snap.py5
-rw-r--r--cloudinit/config/cc_spacewalk.py24
-rw-r--r--cloudinit/config/cc_ssh.py27
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py11
-rw-r--r--cloudinit/config/cc_ssh_import_id.py29
-rw-r--r--cloudinit/config/cc_timezone.py9
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py21
-rw-r--r--cloudinit/config/cc_ubuntu_autoinstall.py5
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py9
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py22
-rw-r--r--cloudinit/config/cc_update_hostname.py15
-rw-r--r--cloudinit/config/cc_users_groups.py5
-rw-r--r--cloudinit/config/cc_wireguard.py5
-rw-r--r--cloudinit/config/cc_write_files.py7
-rw-r--r--cloudinit/config/cc_write_files_deferred.py9
-rw-r--r--cloudinit/config/cc_yum_add_repo.py17
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py5
-rw-r--r--cloudinit/config/modules.py22
-rw-r--r--cloudinit/config/schema.py17
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json7
-rw-r--r--cloudinit/dhclient_hook.py90
-rw-r--r--cloudinit/distros/__init__.py53
-rw-r--r--cloudinit/distros/freebsd.py39
-rw-r--r--cloudinit/distros/netbsd.py7
-rw-r--r--cloudinit/distros/parsers/ifconfig.py7
-rw-r--r--cloudinit/distros/rhel.py7
-rw-r--r--cloudinit/distros/ug_util.py15
-rw-r--r--cloudinit/helpers.py11
-rw-r--r--cloudinit/log.py11
-rw-r--r--cloudinit/net/__init__.py8
-rw-r--r--cloudinit/net/activators.py38
-rw-r--r--cloudinit/net/bsd.py3
-rw-r--r--cloudinit/net/dhcp.py27
-rw-r--r--cloudinit/net/eni.py18
-rw-r--r--cloudinit/net/ephemeral.py28
-rw-r--r--cloudinit/net/netplan.py20
-rw-r--r--cloudinit/net/network_state.py16
-rw-r--r--cloudinit/net/networkd.py16
-rw-r--r--cloudinit/net/renderer.py21
-rw-r--r--cloudinit/net/sysconfig.py7
-rw-r--r--cloudinit/netinfo.py8
-rw-r--r--cloudinit/sources/DataSourceAzure.py73
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py5
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py1
-rw-r--r--cloudinit/sources/DataSourceExoscale.py6
-rw-r--r--cloudinit/sources/DataSourceGCE.py1
-rw-r--r--cloudinit/sources/DataSourceHetzner.py3
-rw-r--r--cloudinit/sources/DataSourceLXD.py7
-rw-r--r--cloudinit/sources/DataSourceNWCS.py24
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py6
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py44
-rw-r--r--cloudinit/sources/DataSourceOracle.py8
-rw-r--r--cloudinit/sources/DataSourceScaleway.py51
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py3
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py8
-rw-r--r--cloudinit/sources/DataSourceVultr.py8
-rw-r--r--cloudinit/sources/__init__.py59
-rw-r--r--cloudinit/sources/azure/imds.py131
-rw-r--r--cloudinit/sources/helpers/cloudsigma.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py5
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py36
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py15
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py12
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py36
-rw-r--r--cloudinit/sources/helpers/vultr.py4
-rw-r--r--cloudinit/stages.py12
-rw-r--r--cloudinit/templater.py15
-rw-r--r--cloudinit/user_data.py2
-rw-r--r--cloudinit/util.py110
-rw-r--r--cloudinit/version.py2
-rw-r--r--doc/examples/cloud-config-datasources.txt5
-rw-r--r--doc/man/cloud-init.14
-rw-r--r--doc/rtd/conf.py42
-rw-r--r--doc/rtd/development/module_creation.rst10
-rw-r--r--doc/rtd/explanation/instancedata.rst4
-rw-r--r--doc/rtd/reference/cli.rst5
-rw-r--r--doc/rtd/reference/faq.rst15
-rw-r--r--doc/rtd/static/css/custom.css248
-rwxr-xr-xpackages/bddeb248
-rw-r--r--packages/debian/compat1
-rwxr-xr-xpackages/debian/rules28
-rw-r--r--packages/redhat/cloud-init.spec.in3
-rw-r--r--packages/suse/cloud-init.spec.in2
-rw-r--r--setup.py5
-rw-r--r--tests/integration_tests/clouds.py2
-rw-r--r--tests/integration_tests/cmd/test_schema.py2
-rw-r--r--tests/integration_tests/conftest.py2
-rw-r--r--tests/integration_tests/datasources/test_detect_openstack.py43
-rw-r--r--tests/integration_tests/datasources/test_oci_networking.py39
-rw-r--r--tests/integration_tests/instances.py4
-rw-r--r--tests/integration_tests/modules/test_combined.py18
-rw-r--r--tests/unittests/cmd/test_main.py3
-rw-r--r--tests/unittests/config/test_apt_configure_sources_list_v1.py21
-rw-r--r--tests/unittests/config/test_apt_configure_sources_list_v3.py11
-rw-r--r--tests/unittests/config/test_apt_source_v1.py18
-rw-r--r--tests/unittests/config/test_apt_source_v3.py5
-rw-r--r--tests/unittests/config/test_cc_ansible.py8
-rw-r--r--tests/unittests/config/test_cc_apk_configure.py39
-rw-r--r--tests/unittests/config/test_cc_apt_pipelining.py4
-rw-r--r--tests/unittests/config/test_cc_bootcmd.py13
-rw-r--r--tests/unittests/config/test_cc_ca_certs.py55
-rw-r--r--tests/unittests/config/test_cc_chef.py15
-rw-r--r--tests/unittests/config/test_cc_disable_ec2_metadata.py9
-rw-r--r--tests/unittests/config/test_cc_disk_setup.py2
-rw-r--r--tests/unittests/config/test_cc_final_message.py3
-rw-r--r--tests/unittests/config/test_cc_growpart.py10
-rw-r--r--tests/unittests/config/test_cc_grub_dpkg.py123
-rw-r--r--tests/unittests/config/test_cc_install_hotplug.py10
-rw-r--r--tests/unittests/config/test_cc_keys_to_console.py2
-rw-r--r--tests/unittests/config/test_cc_landscape.py8
-rw-r--r--tests/unittests/config/test_cc_locale.py10
-rw-r--r--tests/unittests/config/test_cc_lxd.py11
-rw-r--r--tests/unittests/config/test_cc_mcollective.py2
-rw-r--r--tests/unittests/config/test_cc_mounts.py63
-rw-r--r--tests/unittests/config/test_cc_ntp.py18
-rw-r--r--tests/unittests/config/test_cc_phone_home.py4
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py10
-rw-r--r--tests/unittests/config/test_cc_puppet.py65
-rw-r--r--tests/unittests/config/test_cc_refresh_rmc_and_interface.py11
-rw-r--r--tests/unittests/config/test_cc_resizefs.py46
-rw-r--r--tests/unittests/config/test_cc_resolv_conf.py2
-rw-r--r--tests/unittests/config/test_cc_rh_subscription.py24
-rw-r--r--tests/unittests/config/test_cc_runcmd.py8
-rw-r--r--tests/unittests/config/test_cc_seed_random.py23
-rw-r--r--tests/unittests/config/test_cc_set_hostname.py28
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py15
-rw-r--r--tests/unittests/config/test_cc_snap.py2
-rw-r--r--tests/unittests/config/test_cc_ssh.py12
-rw-r--r--tests/unittests/config/test_cc_ssh_import_id.py2
-rw-r--r--tests/unittests/config/test_cc_timezone.py2
-rw-r--r--tests/unittests/config/test_cc_ubuntu_advantage.py19
-rw-r--r--tests/unittests/config/test_cc_ubuntu_autoinstall.py2
-rw-r--r--tests/unittests/config/test_cc_ubuntu_drivers.py36
-rw-r--r--tests/unittests/config/test_cc_update_etc_hosts.py4
-rw-r--r--tests/unittests/config/test_cc_users_groups.py18
-rw-r--r--tests/unittests/config/test_cc_wireguard.py4
-rw-r--r--tests/unittests/config/test_cc_write_files.py4
-rw-r--r--tests/unittests/config/test_cc_write_files_deferred.py2
-rw-r--r--tests/unittests/config/test_cc_yum_add_repo.py6
-rw-r--r--tests/unittests/config/test_cc_zypper_add_repo.py2
-rw-r--r--tests/unittests/config/test_modules.py50
-rw-r--r--tests/unittests/config/test_schema.py6
-rw-r--r--tests/unittests/distros/test__init__.py96
-rw-r--r--tests/unittests/distros/test_create_users.py22
-rw-r--r--tests/unittests/distros/test_netconfig.py20
-rw-r--r--tests/unittests/helpers.py2
-rw-r--r--tests/unittests/net/test_dhcp.py103
-rw-r--r--tests/unittests/net/test_ephemeral.py10
-rw-r--r--tests/unittests/net/test_network_state.py7
-rw-r--r--tests/unittests/sources/azure/test_imds.py282
-rw-r--r--tests/unittests/sources/conftest.py9
-rw-r--r--tests/unittests/sources/test___init__.py40
-rw-r--r--tests/unittests/sources/test_azure.py177
-rw-r--r--tests/unittests/sources/test_cloudsigma.py7
-rw-r--r--tests/unittests/sources/test_ec2.py2
-rw-r--r--tests/unittests/sources/test_exoscale.py16
-rw-r--r--tests/unittests/sources/test_hetzner.py1
-rw-r--r--tests/unittests/sources/test_init.py5
-rw-r--r--tests/unittests/sources/test_nwcs.py14
-rw-r--r--tests/unittests/sources/test_opennebula.py20
-rw-r--r--tests/unittests/sources/test_openstack.py133
-rw-r--r--tests/unittests/sources/test_oracle.py41
-rw-r--r--tests/unittests/sources/test_scaleway.py31
-rw-r--r--tests/unittests/sources/test_upcloud.py2
-rw-r--r--tests/unittests/sources/test_vmware.py6
-rw-r--r--tests/unittests/sources/vmware/test_vmware_config_file.py11
-rw-r--r--tests/unittests/test_apport.py75
-rw-r--r--tests/unittests/test_cli.py22
-rw-r--r--tests/unittests/test_dhclient_hook.py112
-rw-r--r--tests/unittests/test_ds_identify.py7
-rw-r--r--tests/unittests/test_features.py2
-rw-r--r--tests/unittests/test_log.py30
-rw-r--r--tests/unittests/test_net.py43
-rw-r--r--tests/unittests/test_net_activators.py25
-rw-r--r--tests/unittests/test_netinfo.py4
-rw-r--r--tests/unittests/test_util.py17
-rw-r--r--tests/unittests/util.py5
-rw-r--r--tools/.github-cla-signers4
-rwxr-xr-xtools/ds-identify7
-rwxr-xr-xtools/hook-dhclient27
-rwxr-xr-xtools/hook-network-manager26
-rwxr-xr-xtools/hook-rhel.sh29
236 files changed, 2906 insertions, 2745 deletions
diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml
index 889897a3..c8b32d61 100644
--- a/.github/workflows/integration.yml
+++ b/.github/workflows/integration.yml
@@ -37,6 +37,8 @@ jobs:
ubuntu-dev-tools
sudo sbuild-adduser $USER
cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
+ # Install all build and test dependencies
+ ./tools/read-dependencies -r requirements.txt -r test-requirements.txt -d ubuntu -s -i
- name: Build package
run: |
./packages/bddeb -S -d --release ${{ env.RELEASE }}
diff --git a/ChangeLog b/ChangeLog
index 1044b7d5..ea3ed8c2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+23.1.1
+ - source: Force OpenStack when it is only option (#2045)
+ - sources/azure: fix regressions in IMDS behavior (#2041)
+ [Chris Patterson]
+
23.1
- Support transactional-updates for SUSE based distros (#1997)
[Robert Schweikert]
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
index 579005d2..be783977 100644
--- a/bash_completion/cloud-init
+++ b/bash_completion/cloud-init
@@ -10,7 +10,7 @@ _cloudinit_complete()
cur_word="${COMP_WORDS[COMP_CWORD]}"
prev_word="${COMP_WORDS[COMP_CWORD-1]}"
- subcmds="analyze clean collect-logs devel dhclient-hook features init modules query schema single status"
+ subcmds="analyze clean collect-logs devel features init modules query schema single status"
base_params="--help --file --version --debug --force"
case ${COMP_CWORD} in
1)
@@ -30,9 +30,6 @@ _cloudinit_complete()
devel)
COMPREPLY=($(compgen -W "--help hotplug-hook net-convert" -- $cur_word))
;;
- dhclient-hook)
- COMPREPLY=($(compgen -W "--help up down" -- $cur_word))
- ;;
features)
COMPREPLY=($(compgen -W "--help" -- $cur_word))
;;
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 8ce649de..8d5866e3 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -4,7 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import base64
import datetime
import json
import os
@@ -66,20 +65,6 @@ def format_record(msg, event):
return msg.format(**event)
-def dump_event_files(event):
- content = dict((k, v) for k, v in event.items() if k not in ["content"])
- files = content["files"]
- saved = []
- for f in files:
- fname = f["path"]
- fn_local = os.path.basename(fname)
- fcontent = base64.b64decode(f["content"]).decode("ascii")
- util.write_file(fn_local, fcontent)
- saved.append(fn_local)
-
- return saved
-
-
def event_name(event):
if event:
return event.get("name")
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index e42ecf8e..dead3059 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -4,6 +4,7 @@
"""Cloud-init apport interface"""
+import json
import os
from cloudinit.cmd.devel import read_cfg_paths
@@ -101,8 +102,29 @@ def attach_hwinfo(report, ui=None):
def attach_cloud_info(report, ui=None):
- """Prompt for cloud details if available."""
+ """Prompt for cloud details if instance-data unavailable.
+
+ When we have valid _get_instance_data, apport/generic-hooks/cloud_init.py
+ provides CloudName, CloudID, CloudPlatform and CloudSubPlatform.
+
+ Apport/generic-hooks are delivered by cloud-init's downstream branches
+ ubuntu/(devel|kinetic|jammy|focal|bionic) so they will not be represented
+ in upstream main.
+
+ In absence of viable instance-data.json format, prompt for the cloud below.
+ """
+
if ui:
+ paths = read_cfg_paths()
+ try:
+ with open(paths.get_runpath("instance_data")) as file:
+ instance_data = json.load(file)
+ assert instance_data.get("v1", {}).get("cloud_name")
+ return # Valid instance-data means generic-hooks will report
+ except (IOError, json.decoder.JSONDecodeError, AssertionError):
+ pass
+
+ # No valid /run/cloud/instance-data.json on system. Prompt for cloud.
prompt = "Is this machine running in a cloud environment?"
response = ui.yesno(prompt)
if response is None:
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index 560857ef..78085735 100755
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -168,7 +168,7 @@ def is_enabled(hotplug_init, subsystem):
try:
scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
except KeyError as e:
- raise Exception(
+ raise RuntimeError(
"hotplug-hook: cannot handle events for subsystem: {}".format(
subsystem
)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index f28fda15..70efa7b7 100755
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -47,7 +47,6 @@ from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG
from cloudinit import atomic_helper
from cloudinit.config import cc_set_hostname
-from cloudinit import dhclient_hook
from cloudinit.cmd.devel import read_cfg_paths
@@ -219,11 +218,11 @@ def attempt_cmdline_url(path, network=True, cmdline=None) -> Tuple[int, str]:
is_cloud_cfg = False
if is_cloud_cfg:
if cmdline_name == "url":
- LOG.warning(
- "DEPRECATED: `url` kernel command line key is"
- " deprecated for providing cloud-config via URL."
- " Please use `cloud-config-url` kernel command line"
- " parameter instead"
+ util.deprecate(
+ deprecated="The kernel command line key `url`",
+ deprecated_version="22.3",
+ extra_message=" Please use `cloud-config-url` "
+ "kernel command line parameter instead",
)
else:
if cmdline_name == "cloud-config-url":
@@ -822,7 +821,7 @@ def _maybe_set_hostname(init, stage, retry_stage):
)
if hostname: # meta-data or user-data hostname content
try:
- cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None)
+ cc_set_hostname.handle("set-hostname", init.cfg, cloud, None)
except cc_set_hostname.SetHostnameError as e:
LOG.debug(
"Failed setting hostname in %s stage. Will"
@@ -946,11 +945,6 @@ def main(sysv_args=None):
help="Query standardized instance metadata from the command line.",
)
- parser_dhclient = subparsers.add_parser(
- dhclient_hook.NAME, help=dhclient_hook.__doc__
- )
- dhclient_hook.get_parser(parser_dhclient)
-
parser_features = subparsers.add_parser(
"features", help="List defined features."
)
diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py
index 876dbc6b..f8ca29bd 100644
--- a/cloudinit/config/cc_ansible.py
+++ b/cloudinit/config/cc_ansible.py
@@ -4,7 +4,7 @@ import os
import re
import sys
from copy import deepcopy
-from logging import Logger, getLogger
+from logging import getLogger
from textwrap import dedent
from typing import Optional
@@ -39,6 +39,7 @@ meta: MetaSchema = {
dedent(
"""\
ansible:
+ package_name: ansible-core
install_method: distro
pull:
url: "https://github.com/holmanb/vmboot.git"
@@ -155,9 +156,7 @@ class AnsiblePullDistro(AnsiblePull):
return bool(which("ansible"))
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
ansible_cfg: dict = cfg.get("ansible", {})
ansible_user = ansible_cfg.get("run_user")
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
index 07a7fa85..080b1590 100644
--- a/cloudinit/config/cc_apk_configure.py
+++ b/cloudinit/config/cc_apk_configure.py
@@ -6,7 +6,6 @@
"""Apk Configure: Configures apk repositories file."""
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -109,9 +108,7 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""
Call to handle apk_repos sections in cloud-config file.
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 98957f8d..d70b7cb4 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -12,7 +12,6 @@ import glob
import os
import pathlib
import re
-from logging import Logger
from textwrap import dedent
from cloudinit import gpg
@@ -170,17 +169,12 @@ def get_default_mirrors(arch=None, target=None):
raise ValueError("No default mirror known for arch %s" % arch)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""process the config for apt_config. This can be called from
curthooks if a global apt config was provided or via the "apt"
standalone command."""
# keeping code close to curtin codebase via entry handler
target = None
- if log is not None:
- global LOG
- LOG = log
# feed back converted config, but only work on the subset under 'apt'
cfg = convert_to_v3_apt_format(cfg)
apt_cfg = cfg.get("apt", {})
@@ -383,15 +377,6 @@ def rename_apt_lists(new_mirrors, target, arch):
LOG.warning("Failed to rename apt list:", exc_info=True)
-def mirror_to_placeholder(tmpl, mirror, placeholder):
- """mirror_to_placeholder
- replace the specified mirror in a template with a placeholder string
- Checks for existance of the expected mirror and warns if not found"""
- if mirror not in tmpl:
- LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl)
- return tmpl.replace(mirror, placeholder)
-
-
def map_known_suites(suite):
"""there are a few default names which will be auto-extended.
This comes at the inability to use those names literally as suites,
@@ -614,9 +599,10 @@ def add_apt_sources(
def convert_v1_to_v2_apt_format(srclist):
"""convert v1 apt format to v2 (dict in apt_sources)"""
srcdict = {}
- LOG.warning(
- "DEPRECATION: 'apt_sources' deprecated config key found."
- " Use 'apt' instead"
+ util.deprecate(
+ deprecated="Config key 'apt_sources'",
+ deprecated_version="22.1",
+ extra_message="Use 'apt' instead",
)
if isinstance(srclist, list):
LOG.debug("apt config: convert V1 to V2 format (source list to dict)")
@@ -692,18 +678,17 @@ def convert_v2_to_v3_apt_format(oldcfg):
# no old config, so no new one to be created
if not needtoconvert:
return oldcfg
- LOG.warning(
- "DEPRECATION apt: converted deprecated config V2 to V3 format for"
- " keys '%s'. Use updated config keys.",
- ", ".join(needtoconvert),
+ util.deprecate(
+ deprecated=f"The following config key(s): {needtoconvert}",
+ deprecated_version="22.1",
)
# if old AND new config are provided, prefer the new one (LP #1616831)
newaptcfg = oldcfg.get("apt", None)
if newaptcfg is not None:
- LOG.warning(
- "DEPRECATION: apt config: deprecated V1/2 and V3 format specified,"
- " preferring V3"
+ util.deprecate(
+ deprecated="Support for combined old and new apt module keys",
+ deprecated_version="22.1",
)
for oldkey in needtoconvert:
newkey = mapoldkeys[oldkey]
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 6a9ace9c..25cb63df 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -6,7 +6,7 @@
"""Apt Pipelining: configure apt pipelining."""
-from logging import Logger
+import logging
from textwrap import dedent
from cloudinit import util
@@ -15,6 +15,8 @@ from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
+LOG = logging.getLogger(__name__)
+
frequency = PER_INSTANCE
distros = ["ubuntu", "debian"]
DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
@@ -59,20 +61,18 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
apt_pipe_value = cfg.get("apt_pipelining", "os")
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
- write_apt_snippet("0", log, DEFAULT_FILE)
+ write_apt_snippet("0", LOG, DEFAULT_FILE)
elif apt_pipe_value_s in ("none", "unchanged", "os"):
return
elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
- write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
+ write_apt_snippet(apt_pipe_value_s, LOG, DEFAULT_FILE)
else:
- log.warning("Invalid option for apt_pipelining: %s", apt_pipe_value)
+ LOG.warning("Invalid option for apt_pipelining: %s", apt_pipe_value)
def write_apt_snippet(setting, log, f_name):
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 48cd21cc..45012c3a 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -9,8 +9,8 @@
"""Bootcmd: run arbitrary commands early in the boot process."""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, temp_utils, util
@@ -19,6 +19,8 @@ from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
+LOG = logging.getLogger(__name__)
+
frequency = PER_ALWAYS
distros = ["all"]
@@ -63,12 +65,10 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "bootcmd" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'bootcmd' key in configuration", name
)
return
@@ -79,7 +79,7 @@ def handle(
tmpf.write(util.encode_text(content))
tmpf.flush()
except Exception as e:
- util.logexc(log, "Failed to shellify bootcmd: %s", str(e))
+ util.logexc(LOG, "Failed to shellify bootcmd: %s", str(e))
raise
try:
@@ -90,7 +90,7 @@ def handle(
cmd = ["/bin/sh", tmpf.name]
subp.subp(cmd, env=env, capture=False)
except Exception:
- util.logexc(log, "Failed to run bootcmd module %s", name)
+ util.logexc(LOG, "Failed to run bootcmd module %s", name)
raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 681936b4..ef3d1b58 100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -8,7 +8,7 @@
"""Byobu: Enable/disable byobu system wide and for default user."""
-from logging import Logger
+import logging
from cloudinit import subp, util
from cloudinit.cloud import Cloud
@@ -36,6 +36,8 @@ Valid configuration options for this module are:
"""
distros = ["ubuntu", "debian"]
+LOG = logging.getLogger(__name__)
+
meta: MetaSchema = {
"id": "cc_byobu",
"name": "Byobu",
@@ -53,16 +55,14 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if len(args) != 0:
value = args[0]
else:
value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
if not value:
- log.debug("Skipping module named %s, no 'byobu' values found", name)
+ LOG.debug("Skipping module named %s, no 'byobu' values found", name)
return
if value == "user" or value == "system":
@@ -77,7 +77,7 @@ def handle(
"disable",
)
if value not in valid:
- log.warning("Unknown value %s for byobu_by_default", value)
+ LOG.warning("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
mod_sys = value.endswith("-system")
@@ -97,7 +97,7 @@ def handle(
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
if not user:
- log.warning(
+ LOG.warning(
"No default byobu user provided, "
"can not launch %s for the default user",
bl_inst,
@@ -112,7 +112,7 @@ def handle(
if len(shcmd):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
- log.debug("Setting byobu to %s", value)
+ LOG.debug("Setting byobu to %s", value)
subp.subp(cmd, capture=False)
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 169b0e18..c1cd42a4 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -5,7 +5,6 @@
"""CA Certs: Add ca certificates."""
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -25,6 +24,13 @@ DEFAULT_CONFIG = {
"ca_cert_update_cmd": ["update-ca-certificates"],
}
DISTRO_OVERRIDES = {
+ "fedora": {
+ "ca_cert_path": "/etc/pki/ca-trust/",
+ "ca_cert_local_path": "/usr/share/pki/ca-trust-source/",
+ "ca_cert_filename": "anchors/cloud-init-ca-cert-{cert_index}.crt",
+ "ca_cert_config": None,
+ "ca_cert_update_cmd": ["update-ca-trust"],
+ },
"rhel": {
"ca_cert_path": "/etc/pki/ca-trust/",
"ca_cert_local_path": "/usr/share/pki/ca-trust-source/",
@@ -32,8 +38,25 @@ DISTRO_OVERRIDES = {
"ca_cert_config": None,
"ca_cert_update_cmd": ["update-ca-trust"],
},
+ "opensuse": {
+ "ca_cert_path": "/etc/pki/trust/",
+ "ca_cert_local_path": "/usr/share/pki/trust/",
+ "ca_cert_filename": "anchors/cloud-init-ca-cert-{cert_index}.crt",
+ "ca_cert_config": None,
+ "ca_cert_update_cmd": ["update-ca-certificates"],
+ },
}
+for distro in (
+ "opensuse-microos",
+ "opensuse-tumbleweed",
+ "opensuse-leap",
+ "sle_hpc",
+ "sle-micro",
+ "sles",
+):
+ DISTRO_OVERRIDES[distro] = DISTRO_OVERRIDES["opensuse"]
+
MODULE_DESCRIPTION = """\
This module adds CA certificates to the system's CA store and updates any
related files using the appropriate OS-specific utility. The default CA
@@ -48,7 +71,20 @@ configuration option ``remove_defaults``.
Alpine Linux requires the ca-certificates package to be installed in
order to provide the ``update-ca-certificates`` command.
"""
-distros = ["alpine", "debian", "rhel", "ubuntu"]
+distros = [
+ "alpine",
+ "debian",
+ "fedora",
+ "rhel",
+ "opensuse",
+ "opensuse-microos",
+ "opensuse-tumbleweed",
+ "opensuse-leap",
+ "sle_hpc",
+ "sle-micro",
+ "sles",
+ "ubuntu",
+]
meta: MetaSchema = {
"id": "cc_ca_certs",
@@ -148,14 +184,20 @@ def disable_system_ca_certs(distro_cfg):
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- if distro_cfg["ca_cert_config"] is None:
+
+ ca_cert_cfg_fn = distro_cfg["ca_cert_config"]
+
+ if not ca_cert_cfg_fn or not os.path.exists(ca_cert_cfg_fn):
return
+
header_comment = (
"# Modified by cloud-init to deselect certs due to user-data"
)
+
added_header = False
- if os.stat(distro_cfg["ca_cert_config"]).st_size != 0:
- orig = util.load_file(distro_cfg["ca_cert_config"])
+
+ if os.stat(ca_cert_cfg_fn).st_size:
+ orig = util.load_file(ca_cert_cfg_fn)
out_lines = []
for line in orig.splitlines():
if line == header_comment:
@@ -168,9 +210,10 @@ def disable_system_ca_certs(distro_cfg):
out_lines.append(header_comment)
added_header = True
out_lines.append("!" + line)
- util.write_file(
- distro_cfg["ca_cert_config"], "\n".join(out_lines) + "\n", omode="wb"
- )
+
+ util.write_file(
+ ca_cert_cfg_fn, "\n".join(out_lines) + "\n", omode="wb"
+ )
def remove_default_ca_certs(distro_cfg):
@@ -187,9 +230,7 @@ def remove_default_ca_certs(distro_cfg):
util.delete_dir_contents(distro_cfg["ca_cert_local_path"])
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""
Call to handle ca_cert sections in cloud-config file.
@@ -200,9 +241,10 @@ def handle(
@param args: Any module arguments from cloud.cfg
"""
if "ca-certs" in cfg:
- LOG.warning(
- "DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'"
- " instead."
+ util.deprecate(
+ deprecated="Key 'ca-certs'",
+ deprecated_version="22.1",
+ extra_message="Use 'ca_certs' instead.",
)
elif "ca_certs" not in cfg:
LOG.debug(
@@ -222,9 +264,10 @@ def handle(
# If there is a remove_defaults option set to true, disable the system
# default trusted CA certs first.
if "remove-defaults" in ca_cert_cfg:
- LOG.warning(
- "DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated."
- " Use 'ca_certs.remove_defaults' instead."
+ util.deprecate(
+ deprecated="Key 'remove-defaults'",
+ deprecated_version="22.1",
+ extra_message="Use 'remove_defaults' instead.",
)
if ca_cert_cfg.get(
"remove_defaults", ca_cert_cfg.get("remove-defaults", False)
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 5a809230..940aa5f7 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -10,8 +10,8 @@
import itertools
import json
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, temp_utils, templater, url_helper, util
@@ -97,6 +97,8 @@ CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"])
frequency = PER_ALWAYS
distros = ["all"]
+LOG = logging.getLogger(__name__)
+
meta: MetaSchema = {
"id": "cc_chef",
"name": "Chef",
@@ -146,7 +148,7 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def post_run_chef(chef_cfg, log):
+def post_run_chef(chef_cfg):
delete_pem = util.get_cfg_option_bool(
chef_cfg, "delete_validation_post_exec", default=False
)
@@ -154,14 +156,14 @@ def post_run_chef(chef_cfg, log):
os.unlink(CHEF_VALIDATION_PEM_PATH)
-def get_template_params(iid, chef_cfg, log):
+def get_template_params(iid, chef_cfg):
params = CHEF_RB_TPL_DEFAULTS.copy()
# Allow users to overwrite any of the keys they want (if they so choose),
# when a value is None, then the value will be set to None and no boolean
# or string version will be populated...
for (k, v) in chef_cfg.items():
if k not in CHEF_RB_TPL_KEYS:
- log.debug("Skipping unknown chef template key '%s'", k)
+ LOG.debug("Skipping unknown chef template key '%s'", k)
continue
if v is None:
params[k] = None
@@ -189,14 +191,12 @@ def get_template_params(iid, chef_cfg, log):
return params
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""Handler method activated by cloud-init."""
# If there isn't a chef key in the configuration don't do anything
if "chef" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'chef' key in configuration", name
)
return
@@ -218,7 +218,7 @@ def handle(
if vcert != "system":
util.write_file(vkey_path, vcert)
elif not os.path.isfile(vkey_path):
- log.warning(
+ LOG.warning(
"chef validation_cert provided as 'system', but "
"validation_key path '%s' does not exist.",
vkey_path,
@@ -228,7 +228,7 @@ def handle(
template_fn = cloud.get_template_filename("chef_client.rb")
if template_fn:
iid = str(cloud.datasource.get_instance_id())
- params = get_template_params(iid, chef_cfg, log)
+ params = get_template_params(iid, chef_cfg)
# Do a best effort attempt to ensure that the template values that
# are associated with paths have their parent directory created
# before they are used by the chef-client itself.
@@ -239,14 +239,14 @@ def handle(
util.ensure_dirs(param_paths)
templater.render_to_file(template_fn, CHEF_RB_PATH, params)
else:
- log.warning("No template found, not rendering to %s", CHEF_RB_PATH)
+ LOG.warning("No template found, not rendering to %s", CHEF_RB_PATH)
# Set the firstboot json
fb_filename = util.get_cfg_option_str(
chef_cfg, "firstboot_path", default=CHEF_FB_PATH
)
if not fb_filename:
- log.info("First boot path empty, not writing first boot json file")
+ LOG.info("First boot path empty, not writing first boot json file")
else:
initial_json = {}
if "run_list" in chef_cfg:
@@ -263,18 +263,18 @@ def handle(
)
installed = subp.is_exe(CHEF_EXEC_PATH)
if not installed or force_install:
- run = install_chef(cloud, chef_cfg, log)
+ run = install_chef(cloud, chef_cfg)
elif installed:
run = util.get_cfg_option_bool(chef_cfg, "exec", default=False)
else:
run = False
if run:
- run_chef(chef_cfg, log)
- post_run_chef(chef_cfg, log)
+ run_chef(chef_cfg)
+ post_run_chef(chef_cfg)
-def run_chef(chef_cfg, log):
- log.debug("Running chef-client")
+def run_chef(chef_cfg):
+ LOG.debug("Running chef-client")
cmd = [CHEF_EXEC_PATH]
if "exec_arguments" in chef_cfg:
cmd_args = chef_cfg["exec_arguments"]
@@ -283,7 +283,7 @@ def run_chef(chef_cfg, log):
elif isinstance(cmd_args, str):
cmd.append(cmd_args)
else:
- log.warning(
+ LOG.warning(
"Unknown type %s provided for chef"
" 'exec_arguments' expected list, tuple,"
" or string",
@@ -345,7 +345,7 @@ def install_chef_from_omnibus(
)
-def install_chef(cloud: Cloud, chef_cfg, log):
+def install_chef(cloud: Cloud, chef_cfg):
# If chef is not installed, we install chef based on 'install_type'
install_type = util.get_cfg_option_str(
chef_cfg, "install_type", "packages"
@@ -373,7 +373,7 @@ def install_chef(cloud: Cloud, chef_cfg, log):
omnibus_version=omnibus_version,
)
else:
- log.warning("Unknown chef install type '%s'", install_type)
+ LOG.warning("Unknown chef install type '%s'", install_type)
run = False
return run
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 7439b89b..2773e35c 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -8,7 +8,7 @@
"""Disable EC2 Metadata: Disable AWS EC2 metadata."""
-from logging import Logger
+import logging
from textwrap import dedent
from cloudinit import subp, util
@@ -21,6 +21,8 @@ from cloudinit.settings import PER_ALWAYS
REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"]
REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"]
+LOG = logging.getLogger(__name__)
+
meta: MetaSchema = {
"id": "cc_disable_ec2_metadata",
"name": "Disable EC2 Metadata",
@@ -40,9 +42,7 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
@@ -51,14 +51,14 @@ def handle(
elif subp.which("ifconfig"):
reject_cmd = REJECT_CMD_IF
else:
- log.error(
+ LOG.error(
'Neither "route" nor "ip" command found, unable to '
"manipulate routing table"
)
return
subp.subp(reject_cmd, capture=False)
else:
- log.debug(
+ LOG.debug(
"Skipping module named %s, disabling the ec2 route not enabled",
name,
)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 4aae5530..3250efd8 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -10,7 +10,6 @@
import logging
import os
import shlex
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
@@ -112,9 +111,7 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""
See doc/examples/cloud-config-disk-setup.txt for documentation on the
format.
@@ -128,14 +125,14 @@ def handle(
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
update_disk_setup_devices(disk_setup, alias_to_device)
- log.debug("Partitioning disks: %s", str(disk_setup))
+ LOG.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
- log.warning("Invalid disk definition for %s" % disk)
+ LOG.warning("Invalid disk definition for %s", disk)
continue
try:
- log.debug("Creating new partition table/disk")
+ LOG.debug("Creating new partition table/disk")
util.log_time(
logfunc=LOG.debug,
msg="Creating partition on %s" % disk,
@@ -147,15 +144,15 @@ def handle(
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
- log.debug("setting up filesystems: %s", str(fs_setup))
+ LOG.debug("setting up filesystems: %s", str(fs_setup))
update_fs_setup_devices(fs_setup, alias_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
- log.warning("Invalid file system definition: %s" % definition)
+ LOG.warning("Invalid file system definition: %s", definition)
continue
try:
- log.debug("Creating new filesystem.")
+ LOG.debug("Creating new filesystem.")
device = definition.get("device")
util.log_time(
logfunc=LOG.debug,
@@ -274,7 +271,7 @@ def enumerate_disk(device, nodeps=False):
try:
info, _err = subp.subp(lsblk_cmd)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed during disk check for %s\n%s" % (device, e)
) from e
@@ -338,7 +335,7 @@ def check_fs(device):
try:
out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed during disk check for %s\n%s" % (device, e)
) from e
@@ -444,7 +441,7 @@ def get_hdd_size(device):
size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device])
sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device])
except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e)) from e
+ raise RuntimeError("Failed to get %s size\n%s" % (device, e)) from e
return int(size_in_bytes) / int(sector_size)
@@ -462,7 +459,7 @@ def check_partition_mbr_layout(device, layout):
try:
out, _err = subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Error running partition command on %s\n%s" % (device, e)
) from e
@@ -493,7 +490,7 @@ def check_partition_gpt_layout(device, layout):
try:
out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Error running partition command on %s\n%s" % (device, e)
) from e
@@ -542,7 +539,7 @@ def check_partition_layout(table_type, device, layout):
elif "mbr" == table_type:
found_layout = check_partition_mbr_layout(device, layout)
else:
- raise Exception("Unable to determine table type")
+ raise RuntimeError("Unable to determine table type")
LOG.debug(
"called check_partition_%s_layout(%s, %s), returned: %s",
@@ -595,11 +592,11 @@ def get_partition_mbr_layout(size, layout):
if (len(layout) == 0 and isinstance(layout, list)) or not isinstance(
layout, list
):
- raise Exception("Partition layout is invalid")
+ raise RuntimeError("Partition layout is invalid")
last_part_num = len(layout)
if last_part_num > 4:
- raise Exception("Only simply partitioning is allowed.")
+ raise RuntimeError("Only simply partitioning is allowed.")
part_definition = []
part_num = 0
@@ -610,7 +607,9 @@ def get_partition_mbr_layout(size, layout):
if isinstance(part, list):
if len(part) != 2:
- raise Exception("Partition was incorrectly defined: %s" % part)
+ raise RuntimeError(
+ "Partition was incorrectly defined: %s" % part
+ )
percent, part_type = part
part_size = int(float(size) * (float(percent) / 100))
@@ -622,7 +621,7 @@ def get_partition_mbr_layout(size, layout):
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
- raise Exception(
+ raise RuntimeError(
"Calculated partition definition is too big\n%s"
% sfdisk_definition
)
@@ -638,7 +637,7 @@ def get_partition_gpt_layout(size, layout):
for partition in layout:
if isinstance(partition, list):
if len(partition) != 2:
- raise Exception(
+ raise RuntimeError(
"Partition was incorrectly defined: %s" % partition
)
percent, partition_type = partition
@@ -682,7 +681,7 @@ def purge_disk(device):
LOG.info("Purging filesystem on /dev/%s", d["name"])
subp.subp(wipefs_cmd)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed FS purge of /dev/%s" % d["name"]
) from e
@@ -702,7 +701,7 @@ def get_partition_layout(table_type, size, layout):
return get_partition_mbr_layout(size, layout)
elif "gpt" == table_type:
return get_partition_gpt_layout(size, layout)
- raise Exception("Unable to determine table type")
+ raise RuntimeError("Unable to determine table type")
def read_parttbl(device):
@@ -733,7 +732,7 @@ def exec_mkpart_mbr(device, layout):
try:
subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed to partition device %s\n%s" % (device, e)
) from e
@@ -816,7 +815,7 @@ def mkpart(device, definition):
# This prevents you from overwriting the device
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
- raise Exception(
+ raise RuntimeError(
"Device {device} is not a disk device!".format(device=device)
)
@@ -849,7 +848,7 @@ def mkpart(device, definition):
elif "gpt" == table_type:
exec_mkpart_gpt(device, part_definition)
else:
- raise Exception("Unable to determine table type")
+ raise RuntimeError("Unable to determine table type")
LOG.debug("Partition table created for %s", device)
@@ -997,7 +996,7 @@ def mkfs(fs_cfg):
# Check that we can create the FS
if not (fs_type or fs_cmd):
- raise Exception(
+ raise RuntimeError(
"No way to create filesystem '{label}'. fs_type or fs_cmd "
"must be set.".format(label=label)
)
@@ -1059,7 +1058,7 @@ def mkfs(fs_cfg):
try:
subp.subp(fs_cmd, shell=shell)
except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
+ raise RuntimeError("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index ae211f31..e6ce1f19 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -5,7 +5,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Fan: Configure ubuntu fan networking"""
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -87,9 +86,7 @@ def stop_update_start(distro, service, config_file, content):
distro.manage_service("enable", service)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
cfgin = cfg.get("fan")
if not cfgin:
cfgin = {}
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index d773afb1..efc234b9 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -7,7 +7,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Final Message: Output final message when cloud-init has finished"""
-from logging import Logger
+import logging
from textwrap import dedent
from cloudinit import templater, util, version
@@ -56,6 +56,7 @@ meta: MetaSchema = {
"activate_by_schema_keys": [],
}
+LOG = logging.getLogger(__name__)
__doc__ = get_meta_doc(meta)
# Jinja formated default message
@@ -66,9 +67,7 @@ FINAL_MESSAGE_DEF = (
)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
msg_in = ""
if len(args) != 0:
@@ -95,20 +94,20 @@ def handle(
"%s\n" % (templater.render_string(msg_in, subs)),
console=False,
stderr=True,
- log=log,
+ log=LOG,
)
except Exception:
- util.logexc(log, "Failed to render final message template")
+ util.logexc(LOG, "Failed to render final message template")
boot_fin_fn = cloud.paths.boot_finished
try:
contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
util.write_file(boot_fin_fn, contents, ensure_dir_exists=False)
except Exception:
- util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
+ util.logexc(LOG, "Failed to write boot finished file %s", boot_fin_fn)
if cloud.datasource.is_disconnected:
- log.warning("Used fallback datasource")
+ LOG.warning("Used fallback datasource")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index f3acbe2a..cce2f686 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -16,7 +16,6 @@ import re
import stat
from abc import ABC, abstractmethod
from contextlib import suppress
-from logging import Logger
from pathlib import Path
from textwrap import dedent
from typing import Tuple
@@ -566,60 +565,59 @@ def resize_devices(resizer, devices):
return info
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "growpart" not in cfg:
- log.debug(
- "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG
+ LOG.debug(
+ "No 'growpart' entry in cfg. Using default: %s", DEFAULT_CONFIG
)
cfg["growpart"] = DEFAULT_CONFIG
mycfg = cfg.get("growpart")
if not isinstance(mycfg, dict):
- log.warning("'growpart' in config was not a dict")
+ LOG.warning("'growpart' in config was not a dict")
return
mode = mycfg.get("mode", "auto")
if util.is_false(mode):
if mode != "off":
- log.warning(
- f"DEPRECATED: growpart mode '{mode}' is deprecated. "
- "Use 'off' instead."
+ util.deprecate(
+ deprecated="Growpart's 'mode' key with value '{mode}'",
+ deprecated_version="22.2",
+ extra_message="Use 'off' instead.",
)
- log.debug("growpart disabled: mode=%s" % mode)
+ LOG.debug("growpart disabled: mode=%s", mode)
return
if util.is_false(mycfg.get("ignore_growroot_disabled", False)):
if os.path.isfile("/etc/growroot-disabled"):
- log.debug("growpart disabled: /etc/growroot-disabled exists")
- log.debug("use ignore_growroot_disabled to ignore")
+ LOG.debug("growpart disabled: /etc/growroot-disabled exists")
+ LOG.debug("use ignore_growroot_disabled to ignore")
return
devices = util.get_cfg_option_list(mycfg, "devices", ["/"])
if not len(devices):
- log.debug("growpart: empty device list")
+ LOG.debug("growpart: empty device list")
return
try:
resizer = resizer_factory(mode, cloud.distro)
except (ValueError, TypeError) as e:
- log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
+ LOG.debug("growpart unable to find resizer for '%s': %s", mode, e)
if mode != "auto":
raise e
return
resized = util.log_time(
- logfunc=log.debug,
+ logfunc=LOG.debug,
msg="resize_devices",
func=resize_devices,
args=(resizer, devices),
)
for (entry, action, msg) in resized:
if action == RESIZE.CHANGED:
- log.info("'%s' resized: %s" % (entry, msg))
+ LOG.info("'%s' resized: %s", entry, msg)
else:
- log.debug("'%s' %s: %s" % (entry, action, msg))
+ LOG.debug("'%s' %s: %s", entry, action, msg)
RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart))
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index 893204fa..91736146 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -8,8 +8,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Grub Dpkg: Configure grub debconf installation device"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
@@ -22,9 +22,8 @@ from cloudinit.subp import ProcessExecutionError
MODULE_DESCRIPTION = """\
Configure which device is used as the target for grub installation. This module
can be enabled/disabled using the ``enabled`` config key in the ``grub_dpkg``
-config dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``.
-If no installation device is specified this module will execute grub-probe to
-determine which disk the /boot directory is associated with.
+config dict. This module automatically selects a disk using ``grub-probe`` if
+no installation device is specified.
The value which is placed into the debconf database is in the format which the
grub postinstall script expects. Normally, this is a /dev/disk/by-id/ value,
@@ -46,8 +45,11 @@ meta: MetaSchema = {
"""\
grub_dpkg:
enabled: true
+ # BIOS mode (install_devices needs disk)
grub-pc/install_devices: /dev/sda
grub-pc/install_devices_empty: false
+ # EFI mode (install_devices needs partition)
+ grub-efi/install_devices: /dev/sda
"""
)
],
@@ -55,9 +57,10 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def fetch_idevs(log):
+def fetch_idevs():
"""
Fetches the /dev/disk/by-id device grub is installed to.
Falls back to plain disk name if no by-id entry is present.
@@ -65,27 +68,35 @@ def fetch_idevs(log):
disk = ""
devices = []
+ # BIOS mode systems use /boot and the disk path,
+ # EFI mode systems use /boot/efi and the partition path.
+ probe_target = "disk"
+ probe_mount = "/boot"
+ if is_efi_booted():
+ probe_target = "device"
+ probe_mount = "/boot/efi"
+
try:
# get the root disk where the /boot directory resides.
- disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[
- 0
- ].strip()
+ disk = subp.subp(
+ ["grub-probe", "-t", probe_target, probe_mount], capture=True
+ ).stdout.strip()
except ProcessExecutionError as e:
# grub-common may not be installed, especially on containers
# FileNotFoundError is a nested exception of ProcessExecutionError
if isinstance(e.reason, FileNotFoundError):
- log.debug("'grub-probe' not found in $PATH")
+ LOG.debug("'grub-probe' not found in $PATH")
# disks from the container host are present in /proc and /sys
# which is where grub-probe determines where /boot is.
# it then checks for existence in /dev, which fails as host disks
# are not exposed to the container.
elif "failed to get canonical path" in e.stderr:
- log.debug("grub-probe 'failed to get canonical path'")
+ LOG.debug("grub-probe 'failed to get canonical path'")
else:
# something bad has happened, continue to log the error
raise
except Exception:
- util.logexc(log, "grub-probe failed to execute for grub-dpkg")
+ util.logexc(LOG, "grub-probe failed to execute for grub-dpkg")
if not disk or not os.path.exists(disk):
# If we failed to detect a disk, we can return early
@@ -97,68 +108,89 @@ def fetch_idevs(log):
subp.subp(
["udevadm", "info", "--root", "--query=symlink", disk],
capture=True,
- )[0]
- .strip()
+ )
+ .stdout.strip()
.split()
)
except Exception:
util.logexc(
- log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
+ LOG, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
)
- log.debug("considering these device symlinks: %s", ",".join(devices))
+ LOG.debug("considering these device symlinks: %s", ",".join(devices))
# filter symlinks for /dev/disk/by-id entries
devices = [dev for dev in devices if "disk/by-id" in dev]
- log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices))
+ LOG.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices))
# select first device if there is one, else fall back to plain name
idevs = sorted(devices)[0] if devices else disk
- log.debug("selected %s", idevs)
+ LOG.debug("selected %s", idevs)
return idevs
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def is_efi_booted() -> bool:
+ """
+ Check if the system is booted in EFI mode.
+ """
+ try:
+ return os.path.exists("/sys/firmware/efi")
+ except OSError as e:
+ LOG.error("Failed to determine if system is booted in EFI mode: %s", e)
+ # If we can't determine if we're booted in EFI mode, assume we're not.
+ return False
+
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
if not mycfg:
mycfg = {}
enabled = mycfg.get("enabled", True)
if util.is_false(enabled):
- log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
+ LOG.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
return
- idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- if idevs is None:
- idevs = fetch_idevs(log)
-
- idevs_empty = mycfg.get("grub-pc/install_devices_empty")
- if idevs_empty is None:
- idevs_empty = not idevs
- elif not isinstance(idevs_empty, bool):
- idevs_empty = util.translate_bool(idevs_empty)
- idevs_empty = str(idevs_empty).lower()
-
- # now idevs and idevs_empty are set to determined values
- # or, those set by user
-
- dconf_sel = (
- "grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n"
- % (idevs, idevs_empty)
- )
-
- log.debug(
- "Setting grub debconf-set-selections with '%s','%s'"
- % (idevs, idevs_empty)
- )
+ dconf_sel = get_debconf_config(mycfg)
+ LOG.debug("Setting grub debconf-set-selections with '%s'", dconf_sel)
try:
subp.subp(["debconf-set-selections"], dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
+ except Exception as e:
+ util.logexc(
+ LOG, "Failed to run debconf-set-selections for grub_dpkg: %s", e
+ )
+
+def get_debconf_config(mycfg: Config) -> str:
+ """
+ Returns the debconf config for grub-pc or
+ grub-efi depending on the systems boot mode.
+ """
+ if is_efi_booted():
+ idevs = util.get_cfg_option_str(
+ mycfg, "grub-efi/install_devices", None
+ )
-# vi: ts=4 expandtab
+ if idevs is None:
+ idevs = fetch_idevs()
+
+ return "grub-pc grub-efi/install_devices string %s\n" % idevs
+ else:
+ idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
+ if idevs is None:
+ idevs = fetch_idevs()
+
+ idevs_empty = mycfg.get("grub-pc/install_devices_empty")
+ if idevs_empty is None:
+ idevs_empty = not idevs
+ elif not isinstance(idevs_empty, bool):
+ idevs_empty = util.translate_bool(idevs_empty)
+ idevs_empty = str(idevs_empty).lower()
+
+ # now idevs and idevs_empty are set to determined values
+ # or, those set by user
+ return (
+ "grub-pc grub-pc/install_devices string %s\n"
+ "grub-pc grub-pc/install_devices_empty boolean %s\n"
+ % (idevs, idevs_empty)
+ )
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
index b95b8a4c..621e3be5 100644
--- a/cloudinit/config/cc_install_hotplug.py
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Install hotplug udev rules if supported and enabled"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import stages, subp, util
@@ -56,6 +56,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
@@ -68,9 +69,7 @@ LABEL="cloudinit_end"
"""
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
network_hotplug_enabled = (
"updates" in cfg
and "network" in cfg["updates"]
@@ -90,19 +89,19 @@ def handle(
)
if not (hotplug_supported and hotplug_enabled):
if os.path.exists(HOTPLUG_UDEV_PATH):
- log.debug("Uninstalling hotplug, not enabled")
+ LOG.debug("Uninstalling hotplug, not enabled")
util.del_file(HOTPLUG_UDEV_PATH)
subp.subp(["udevadm", "control", "--reload-rules"])
elif network_hotplug_enabled:
- log.warning(
+ LOG.warning(
"Hotplug is unsupported by current datasource. "
"Udev rules will NOT be installed."
)
else:
- log.debug("Skipping hotplug install, not enabled")
+ LOG.debug("Skipping hotplug install, not enabled")
return
if not subp.which("udevadm"):
- log.debug("Skipping hotplug install, udevadm not found")
+ LOG.debug("Skipping hotplug install, udevadm not found")
return
# This may need to turn into a distro property at some point
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
index f6075e63..5e61a0da 100644
--- a/cloudinit/config/cc_keyboard.py
+++ b/cloudinit/config/cc_keyboard.py
@@ -6,7 +6,6 @@
"""keyboard: set keyboard layout"""
-from logging import Logger
from textwrap import dedent
from cloudinit import distros
@@ -60,9 +59,7 @@ __doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "keyboard" not in cfg:
LOG.debug(
"Skipping module named %s, no 'keyboard' section found", name
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 649c0abb..657910af 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -8,8 +8,8 @@
"""Keys to Console: Control which SSH host keys may be written to console"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
@@ -68,6 +68,8 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
+
def _get_helper_tool_path(distro):
try:
@@ -77,18 +79,16 @@ def _get_helper_tool_path(distro):
return HELPER_TOOL_TPL % base_lib
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)):
- log.debug(
+ LOG.debug(
"Skipping module named %s, logging of SSH host keys disabled", name
)
return
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
- log.warning(
+ LOG.warning(
"Unable to activate module %s, helper tool not found at %s",
name,
helper_path,
@@ -107,7 +107,7 @@ def handle(
(stdout, _stderr) = subp.subp(cmd)
util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True)
except Exception:
- log.warning("Writing keys to the system console failed!")
+ LOG.warning("Writing keys to the system console failed!")
raise
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 8abb4c5f..a34ea019 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -8,9 +8,9 @@
"""install and configure landscape client"""
+import logging
import os
from io import BytesIO
-from logging import Logger
from textwrap import dedent
from configobj import ConfigObj
@@ -98,11 +98,10 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""
Basically turn a top level 'landscape' entry with a 'client' dict
and render it to ConfigObj format under '[client]' section in
@@ -135,7 +134,7 @@ def handle(
util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
- log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
+ LOG.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
subp.subp(["service", "landscape-client", "restart"])
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 4a53e765..b69475a7 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -8,7 +8,7 @@
"""Locale: set system locale"""
-from logging import Logger
+import logging
from textwrap import dedent
from cloudinit import util
@@ -49,23 +49,22 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if len(args) != 0:
locale = args[0]
else:
locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
if util.is_false(locale):
- log.debug(
+ LOG.debug(
"Skipping module named %s, disabled by config: %s", name, locale
)
return
- log.debug("Setting locale to %s", locale)
+ LOG.debug("Setting locale to %s", locale)
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index e692fbd5..695d233e 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -7,7 +7,6 @@
"""LXD: configure lxd with ``lxd init`` and optionally lxd-bridge"""
import os
-from logging import Logger
from textwrap import dedent
from typing import List, Tuple
@@ -197,13 +196,11 @@ def supplemental_schema_validation(
raise ValueError(". ".join(errors))
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# Get config
lxd_cfg = cfg.get("lxd")
if not lxd_cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, not present or disabled by cfg", name
)
return
@@ -224,7 +221,7 @@ def handle(
try:
cloud.distro.install_packages(packages)
except subp.ProcessExecutionError as exc:
- log.warning("failed to install packages %s: %s", packages, exc)
+ LOG.warning("failed to install packages %s: %s", packages, exc)
return
subp.subp(["lxd", "waitready", "--timeout=300"])
@@ -251,7 +248,7 @@ def handle(
if init_cfg["storage_backend"] == "lvm" and not os.path.exists(
f"/lib/modules/{kernel}/kernel/drivers/md/dm-thin-pool.ko"
):
- log.warning(
+ LOG.warning(
"cloud-init doesn't use thinpool by default on Ubuntu due to "
"LP #1982780. This behavior will change in the future.",
)
@@ -294,7 +291,7 @@ def handle(
# Update debconf database
try:
- log.debug("Setting lxd debconf via " + dconf_comm)
+ LOG.debug("Setting lxd debconf via %s", dconf_comm)
data = (
"\n".join(
["set %s %s" % (k, v) for k, v in debconf.items()]
@@ -304,14 +301,14 @@ def handle(
subp.subp(["debconf-communicate"], data)
except Exception:
util.logexc(
- log, "Failed to run '%s' for lxd with" % dconf_comm
+ LOG, "Failed to run '%s' for lxd with" % dconf_comm
)
# Remove the existing configuration file (forces re-generation)
util.del_file("/etc/default/lxd-bridge")
# Run reconfigure
- log.debug("Running dpkg-reconfigure for lxd")
+ LOG.debug("Running dpkg-reconfigure for lxd")
subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"])
else:
# Built-in LXD bridge support
@@ -323,12 +320,12 @@ def handle(
attach=bool(cmd_attach),
)
if cmd_create:
- log.debug("Creating lxd bridge: %s" % " ".join(cmd_create))
+ LOG.debug("Creating lxd bridge: %s", " ".join(cmd_create))
_lxc(cmd_create)
if cmd_attach:
- log.debug(
- "Setting up default lxd bridge: %s" % " ".join(cmd_attach)
+ LOG.debug(
+ "Setting up default lxd bridge: %s", " ".join(cmd_attach)
)
_lxc(cmd_attach)
@@ -382,7 +379,7 @@ def bridge_to_debconf(bridge_cfg):
debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
else:
- raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
+ raise RuntimeError('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
return debconf
@@ -399,7 +396,7 @@ def bridge_to_cmd(bridge_cfg):
return None, cmd_attach
if bridge_cfg.get("mode") != "new":
- raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
+ raise RuntimeError('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
cmd_create = ["network", "create", bridge_name]
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 7d75078d..967ca8f3 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -11,7 +11,6 @@
import errno
import io
-from logging import Logger
from textwrap import dedent
# Used since this can maintain comments
@@ -89,6 +88,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
def configure(
@@ -152,13 +152,11 @@ def configure(
util.write_file(server_cfg, contents.getvalue(), mode=0o644)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# If there isn't a mcollective key in the configuration don't do anything
if "mcollective" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'mcollective' key in configuration",
name,
)
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 956a9478..13a0f00a 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -6,9 +6,9 @@
"""Migrator: Migrate old versions of cloud-init data to new"""
+import logging
import os
import shutil
-from logging import Logger
from cloudinit import helpers, util
from cloudinit.cloud import Cloud
@@ -39,6 +39,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
def _migrate_canon_sems(cloud):
@@ -59,7 +60,7 @@ def _migrate_canon_sems(cloud):
return am_adjusted
-def _migrate_legacy_sems(cloud, log):
+def _migrate_legacy_sems(cloud):
legacy_adjust = {
"apt-update-upgrade": [
"apt-configure",
@@ -82,25 +83,23 @@ def _migrate_legacy_sems(cloud, log):
util.del_file(os.path.join(sem_path, p))
(_name, freq) = os.path.splitext(p)
for m in migrate_to:
- log.debug(
+ LOG.debug(
"Migrating %s => %s with the same frequency", p, m
)
with sem_helper.lock(m, freq):
pass
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
do_migrate = util.get_cfg_option_str(cfg, "migrate", True)
if not util.translate_bool(do_migrate):
- log.debug("Skipping module named %s, migration disabled", name)
+ LOG.debug("Skipping module named %s, migration disabled", name)
return
sems_moved = _migrate_canon_sems(cloud)
- log.debug(
+ LOG.debug(
"Migrated %s semaphore files to there canonicalized names", sems_moved
)
- _migrate_legacy_sems(cloud, log)
+ _migrate_legacy_sems(cloud)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index db7a7c26..cd26a4ee 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -12,7 +12,6 @@ import logging
import math
import os
import re
-from logging import Logger
from string import whitespace
from textwrap import dedent
@@ -163,15 +162,15 @@ def _is_block_device(device_path, partition_path=None):
return os.path.exists(sys_path)
-def sanitize_devname(startname, transformer, log, aliases=None):
- log.debug("Attempting to determine the real name of %s", startname)
+def sanitize_devname(startname, transformer, aliases=None):
+ LOG.debug("Attempting to determine the real name of %s", startname)
# workaround, allow user to specify 'ephemeral'
# rather than more ec2 correct 'ephemeral0'
devname = startname
if devname == "ephemeral":
devname = "ephemeral0"
- log.debug("Adjusted mount option from ephemeral to ephemeral0")
+ LOG.debug("Adjusted mount option from ephemeral to ephemeral0")
if is_network_device(startname):
return startname
@@ -182,7 +181,7 @@ def sanitize_devname(startname, transformer, log, aliases=None):
if aliases:
device_path = aliases.get(device_path, device_path)
if orig != device_path:
- log.debug("Mapped device alias %s to %s", orig, device_path)
+ LOG.debug("Mapped device alias %s to %s", orig, device_path)
if is_meta_device_name(device_path):
device_path = transformer(device_path)
@@ -190,7 +189,7 @@ def sanitize_devname(startname, transformer, log, aliases=None):
return None
if not device_path.startswith("/"):
device_path = "/dev/%s" % (device_path,)
- log.debug("Mapped metadata name %s to %s", orig, device_path)
+ LOG.debug("Mapped metadata name %s to %s", orig, device_path)
else:
if DEVICE_NAME_RE.match(startname):
device_path = "/dev/%s" % (device_path,)
@@ -407,9 +406,7 @@ def handle_swapcfg(swapcfg):
return None
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
def_mnt_opts = "defaults,nobootwait"
uses_systemd = cloud.distro.uses_systemd()
@@ -455,7 +452,7 @@ def handle(
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
- log.warning(
+ LOG.warning(
"Mount option %s not a list, got a %s instead",
(i + 1),
type_utils.obj_name(cfgmnt[i]),
@@ -464,16 +461,16 @@ def handle(
start = str(cfgmnt[i][0])
sanitized = sanitize_devname(
- start, cloud.device_name_to_device, log, aliases=device_aliases
+ start, cloud.device_name_to_device, aliases=device_aliases
)
if sanitized != start:
- log.debug("changed %s => %s" % (start, sanitized))
+ LOG.debug("changed %s => %s", start, sanitized)
if sanitized is None:
- log.debug("Ignoring nonexistent named mount %s", start)
+ LOG.debug("Ignoring nonexistent named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.info(
+ LOG.info(
"Device %s already defined in fstab: %s",
sanitized,
fstab_devs[sanitized],
@@ -511,16 +508,16 @@ def handle(
for defmnt in defmnts:
start = defmnt[0]
sanitized = sanitize_devname(
- start, cloud.device_name_to_device, log, aliases=device_aliases
+ start, cloud.device_name_to_device, aliases=device_aliases
)
if sanitized != start:
- log.debug("changed default device %s => %s" % (start, sanitized))
+ LOG.debug("changed default device %s => %s", start, sanitized)
if sanitized is None:
- log.debug("Ignoring nonexistent default named mount %s", start)
+ LOG.debug("Ignoring nonexistent default named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.debug(
+ LOG.debug(
"Device %s already defined in fstab: %s",
sanitized,
fstab_devs[sanitized],
@@ -536,7 +533,7 @@ def handle(
break
if cfgmnt_has:
- log.debug("Not including %s, already previously included", start)
+ LOG.debug("Not including %s, already previously included", start)
continue
cfgmnt.append(defmnt)
@@ -545,7 +542,7 @@ def handle(
actlist = []
for x in cfgmnt:
if x[1] is None:
- log.debug("Skipping nonexistent device named %s", x[0])
+ LOG.debug("Skipping nonexistent device named %s", x[0])
else:
actlist.append(x)
@@ -554,7 +551,7 @@ def handle(
actlist.append([swapret, "none", "swap", "sw", "0", "0"])
if len(actlist) == 0:
- log.debug("No modifications to fstab needed")
+ LOG.debug("No modifications to fstab needed")
return
cc_lines = []
@@ -577,7 +574,7 @@ def handle(
try:
util.ensure_dir(d)
except Exception:
- util.logexc(log, "Failed to make '%s' config-mount", d)
+ util.logexc(LOG, "Failed to make '%s' config-mount", d)
# dirs is list of directories on which a volume should be mounted.
# If any of them does not already show up in the list of current
# mount points, we will definitely need to do mount -a.
@@ -600,9 +597,9 @@ def handle(
activate_cmds.append(["swapon", "-a"])
if len(sops) == 0:
- log.debug("No changes to /etc/fstab made.")
+ LOG.debug("No changes to /etc/fstab made.")
else:
- log.debug("Changes to fstab: %s", sops)
+ LOG.debug("Changes to fstab: %s", sops)
need_mount_all = True
if need_mount_all:
@@ -615,10 +612,10 @@ def handle(
fmt = "Activate mounts: %s:" + " ".join(cmd)
try:
subp.subp(cmd)
- log.debug(fmt, "PASS")
+ LOG.debug(fmt, "PASS")
except subp.ProcessExecutionError:
- log.warning(fmt, "FAIL")
- util.logexc(log, fmt, "FAIL")
+ LOG.warning(fmt, "FAIL")
+ util.logexc(LOG, fmt, "FAIL")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index b5620f37..47659af7 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -8,7 +8,6 @@
import copy
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -548,9 +547,7 @@ def supplemental_schema_validation(ntp_config):
)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""Enable and configure ntp."""
if "ntp" not in cfg:
LOG.debug(
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 7d346a19..7935c33a 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -8,7 +8,6 @@
import os
import time
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -60,6 +59,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
def _multi_cfg_bool_get(cfg, *keys):
@@ -69,7 +69,7 @@ def _multi_cfg_bool_get(cfg, *keys):
return False
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
+def _fire_reboot(wait_attempts=6, initial_sleep=1, backoff=2):
subp.subp(REBOOT_CMD)
start = time.time()
wait_time = initial_sleep
@@ -77,7 +77,7 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
time.sleep(wait_time)
wait_time *= backoff
elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
+ LOG.debug("Rebooted, but still running after %s seconds", int(elapsed))
# If we got here, not good
elapsed = time.time() - start
raise RuntimeError(
@@ -85,9 +85,7 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# Handle the old style + new config names
update = _multi_cfg_bool_get(cfg, "apt_update", "package_update")
upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade")
@@ -101,21 +99,21 @@ def handle(
try:
cloud.distro.update_package_sources()
except Exception as e:
- util.logexc(log, "Package update failed")
+ util.logexc(LOG, "Package update failed")
errors.append(e)
if upgrade:
try:
cloud.distro.package_command("upgrade")
except Exception as e:
- util.logexc(log, "Package upgrade failed")
+ util.logexc(LOG, "Package upgrade failed")
errors.append(e)
if len(pkglist):
try:
cloud.distro.install_packages(pkglist)
except Exception as e:
- util.logexc(log, "Failed to install packages: %s", pkglist)
+ util.logexc(LOG, "Failed to install packages: %s", pkglist)
errors.append(e)
# TODO(smoser): handle this less violently
@@ -125,18 +123,18 @@ def handle(
reboot_fn_exists = os.path.isfile(REBOOT_FILE)
if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
try:
- log.warning(
+ LOG.warning(
"Rebooting after upgrade or install per %s", REBOOT_FILE
)
# Flush the above warning + anything else out...
- logging.flushLoggers(log)
- _fire_reboot(log)
+ logging.flushLoggers(LOG)
+ _fire_reboot()
except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
+ util.logexc(LOG, "Requested reboot did not happen!")
errors.append(e)
if len(errors):
- log.warning(
+ LOG.warning(
"%s failed with exceptions, re-raising the last one", len(errors)
)
raise errors[-1]
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 7bbee5af..a0589404 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -8,7 +8,7 @@
"""Phone Home: Post data to url"""
-from logging import Logger
+import logging
from textwrap import dedent
from cloudinit import templater, url_helper, util
@@ -95,7 +95,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
-
+LOG = logging.getLogger(__name__)
# phone_home:
# url: http://my.foo.bar/$INSTANCE/
# post: all
@@ -108,14 +108,12 @@ __doc__ = get_meta_doc(meta)
#
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
if "phone_home" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, "
"no 'phone_home' configuration found",
name,
@@ -124,7 +122,7 @@ def handle(
ph_cfg = cfg["phone_home"]
if "url" not in ph_cfg:
- log.warning(
+ LOG.warning(
"Skipping module named %s, "
"no 'url' found in 'phone_home' configuration",
name,
@@ -139,7 +137,7 @@ def handle(
except (ValueError, TypeError):
tries = 10
util.logexc(
- log,
+ LOG,
"Configuration entry 'tries' is not an integer, using %s instead",
tries,
)
@@ -165,7 +163,7 @@ def handle(
all_keys[n] = util.load_file(path)
except Exception:
util.logexc(
- log, "%s: failed to open, can not phone home that data!", path
+ LOG, "%s: failed to open, can not phone home that data!", path
)
submit_keys = {}
@@ -174,7 +172,7 @@ def handle(
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = None
- log.warning(
+ LOG.warning(
"Requested key %s from 'post'"
" configuration list not available",
k,
@@ -203,7 +201,7 @@ def handle(
)
except Exception:
util.logexc(
- log, "Failed to post phone home data to %s in %s tries", url, tries
+ LOG, "Failed to post phone home data to %s in %s tries", url, tries
)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 1eb63d78..1c2df860 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -7,11 +7,11 @@
"""Power State Change: Change power state"""
import errno
+import logging
import os
import re
import subprocess
import time
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
@@ -79,6 +79,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
def givecmdline(pid):
@@ -99,10 +100,9 @@ def givecmdline(pid):
return None
-def check_condition(cond, log=None):
+def check_condition(cond):
if isinstance(cond, bool):
- if log:
- log.debug("Static Condition: %s" % cond)
+ LOG.debug("Static Condition: %s", cond)
return cond
pre = "check_condition command (%s): " % cond
@@ -111,58 +111,49 @@ def check_condition(cond, log=None):
proc.communicate()
ret = proc.returncode
if ret == 0:
- if log:
- log.debug(pre + "exited 0. condition met.")
+ LOG.debug("%sexited 0. condition met.", pre)
return True
elif ret == 1:
- if log:
- log.debug(pre + "exited 1. condition not met.")
+ LOG.debug("%sexited 1. condition not met.", pre)
return False
else:
- if log:
- log.warning(
- pre + "unexpected exit %s. " % ret + "do not apply change."
- )
+ LOG.warning("%sunexpected exit %s. do not apply change.", pre, ret)
return False
except Exception as e:
- if log:
- log.warning(pre + "Unexpected error: %s" % e)
+ LOG.warning("%sUnexpected error: %s", pre, e)
return False
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
try:
(args, timeout, condition) = load_power_state(cfg, cloud.distro)
if args is None:
- log.debug("no power_state provided. doing nothing")
+ LOG.debug("no power_state provided. doing nothing")
return
except Exception as e:
- log.warning("%s Not performing power state change!" % str(e))
+ LOG.warning("%s Not performing power state change!", str(e))
return
if condition is False:
- log.debug("Condition was false. Will not perform state change.")
+ LOG.debug("Condition was false. Will not perform state change.")
return
mypid = os.getpid()
cmdline = givecmdline(mypid)
if not cmdline:
- log.warning("power_state: failed to get cmdline of current process")
+ LOG.warning("power_state: failed to get cmdline of current process")
return
devnull_fp = open(os.devnull, "w")
- log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args)))
+ LOG.debug("After pid %s ends, will execute: %s", mypid, " ".join(args))
util.fork_cb(
run_after_pid_gone,
mypid,
cmdline,
timeout,
- log,
condition,
execmd,
[args, devnull_fp],
@@ -227,7 +218,7 @@ def execmd(exe_args, output=None, data_in=None):
doexit(ret)
-def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
+def run_after_pid_gone(pid, pidcmdline, timeout, condition, func, args):
# wait until pid, with /proc/pid/cmdline contents of pidcmdline
# is no longer alive. After it is gone, or timeout has passed
# execute func(args)
@@ -235,8 +226,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
end_time = time.time() + timeout
def fatal(msg):
- if log:
- log.warning(msg)
+ LOG.warning(msg)
doexit(EXIT_FAIL)
known_errnos = (errno.ENOENT, errno.ESRCH)
@@ -267,11 +257,10 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
if not msg:
fatal("Unexpected error in run_after_pid_gone")
- if log:
- log.debug(msg)
+ LOG.debug(msg)
try:
- if not check_condition(condition, log):
+ if not check_condition(condition):
return
except Exception as e:
fatal("Unexpected Exception when checking condition: %s" % e)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 38c2cc99..a3a2a4cf 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -8,10 +8,10 @@
"""Puppet: Install, configure and start puppet"""
+import logging
import os
import socket
from io import StringIO
-from logging import Logger
from textwrap import dedent
import yaml
@@ -107,10 +107,15 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
+
class PuppetConstants:
def __init__(
- self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log
+ self,
+ puppet_conf_file,
+ puppet_ssl_dir,
+ csr_attributes_path,
):
self.conf_path = puppet_conf_file
self.ssl_dir = puppet_ssl_dir
@@ -119,7 +124,7 @@ class PuppetConstants:
self.csr_attributes_path = csr_attributes_path
-def _manage_puppet_services(log, cloud: Cloud, action: str):
+def _manage_puppet_services(cloud: Cloud, action: str):
"""Attempts to perform action on one of the puppet services"""
service_managed: str = ""
for puppet_name in PUPPET_PACKAGE_NAMES:
@@ -130,7 +135,7 @@ def _manage_puppet_services(log, cloud: Cloud, action: str):
except subp.ProcessExecutionError:
pass
if not service_managed:
- log.warning(
+ LOG.warning(
"Could not '%s' any of the following services: %s",
action,
", ".join(PUPPET_PACKAGE_NAMES),
@@ -182,12 +187,10 @@ def install_puppet_aio(
return subp.subp([tmpf] + args, capture=False)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# If there isn't a puppet key in the configuration don't do anything
if "puppet" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'puppet' configuration found", name
)
return
@@ -223,11 +226,11 @@ def handle(
puppet_cfg, "package_name", puppet_package
)
if not install and version:
- log.warning(
+ LOG.warning(
"Puppet install set to false but version supplied, doing nothing."
)
elif install:
- log.debug(
+ LOG.debug(
"Attempting to install puppet %s from %s",
version if version else "latest",
install_type,
@@ -243,7 +246,7 @@ def handle(
except subp.ProcessExecutionError:
pass
if not package_name:
- log.warning(
+ LOG.warning(
"No installable puppet package in any of: %s",
", ".join(PUPPET_PACKAGE_NAMES),
)
@@ -255,7 +258,7 @@ def handle(
cloud.distro, aio_install_url, version, collection, cleanup
)
else:
- log.warning("Unknown puppet install type '%s'", install_type)
+ LOG.warning("Unknown puppet install type '%s'", install_type)
run = False
conf_file = util.get_cfg_option_str(
@@ -270,7 +273,7 @@ def handle(
get_config_value(puppet_bin, "csr_attributes"),
)
- p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
+ p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path)
# ... and then update the puppet configuration
if "conf" in puppet_cfg:
@@ -329,11 +332,11 @@ def handle(
if start_puppetd:
# Enables the services
- _manage_puppet_services(log, cloud, "enable")
+ _manage_puppet_services(cloud, "enable")
# Run the agent if needed
if run:
- log.debug("Running puppet-agent")
+ LOG.debug("Running puppet-agent")
cmd = [puppet_bin, "agent"]
if "exec_args" in puppet_cfg:
cmd_args = puppet_cfg["exec_args"]
@@ -342,7 +345,7 @@ def handle(
elif isinstance(cmd_args, str):
cmd.extend(cmd_args.split())
else:
- log.warning(
+ LOG.warning(
"Unknown type %s provided for puppet"
" 'exec_args' expected list, tuple,"
" or string",
@@ -355,7 +358,7 @@ def handle(
if start_puppetd:
# Start puppetd
- _manage_puppet_services(log, cloud, "start")
+ _manage_puppet_services(cloud, "start")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 18c22476..28cf343d 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -8,7 +8,6 @@
Ensure Network Manager is not managing IPv6 interface"""
import errno
-from logging import Logger
from cloudinit import log as logging
from cloudinit import netinfo, subp, util
@@ -57,9 +56,7 @@ LOG = logging.getLogger(__name__)
RMCCTRL = "rmcctrl"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if not subp.which(RMCCTRL):
LOG.debug("No '%s' in path, disabled", RMCCTRL)
return
@@ -150,20 +147,3 @@ def search(contents):
or contents.startswith("IPV6INIT")
or contents.startswith("NM_CONTROLLED")
)
-
-
-def refresh_rmc():
- # To make a healthy connection between RMC daemon and hypervisor we
- # refresh RMC. With refreshing RMC we are ensuring that making IPv6
- # down and up shouldn't impact communication between RMC daemon and
- # hypervisor.
- # -z : stop Resource Monitoring & Control subsystem and all resource
- # managers, but the command does not return control to the user
- # until the subsystem and all resource managers are stopped.
- # -s : start Resource Monitoring & Control subsystem.
- try:
- subp.subp([RMCCTRL, "-z"])
- subp.subp([RMCCTRL, "-s"])
- except Exception:
- util.logexc(LOG, "Failed to refresh the RMC subsystem.")
- raise
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index a780e4ff..10f4b7ea 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -7,7 +7,6 @@
import os
-from logging import Logger
from cloudinit import log as logging
from cloudinit import subp, util
@@ -65,9 +64,7 @@ LOG = logging.getLogger(__name__)
NODE_ID_FILE = "/etc/ct_node_id"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# Ensuring node id has to be generated only once during first boot
if cloud.datasource.platform_type == "none":
LOG.debug("Skipping creation of new ct_node_id node")
@@ -149,4 +146,4 @@ def reset_rmc():
if node_id_after == node_id_before:
msg = "New node ID did not get generated."
LOG.error(msg)
- raise Exception(msg)
+ raise RuntimeError(msg)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 0e6197a2..1ef0f475 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -9,9 +9,9 @@
"""Resizefs: cloud-config module which resizes the filesystem"""
import errno
+import logging
import os
import stat
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
@@ -50,6 +50,8 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
+
def _resize_btrfs(mount_point, devpth):
# If "/" is ro resize will fail. However it should be allowed since resize
@@ -146,7 +148,7 @@ def can_skip_resize(fs_type, resize_what, devpth):
return False
-def maybe_get_writable_device_path(devpath, info, log):
+def maybe_get_writable_device_path(devpath, info):
"""Return updated devpath if the devpath is a writable block device.
@param devpath: Requested path to the root device we want to resize.
@@ -166,25 +168,25 @@ def maybe_get_writable_device_path(devpath, info, log):
):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
- log.warning("Unable to find device '/dev/root'")
+ LOG.warning("Unable to find device '/dev/root'")
return None
- log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
+ LOG.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
if devpath == "overlayroot":
- log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
+ LOG.debug("Not attempting to resize devpath '%s': %s", devpath, info)
return None
# FreeBSD zpool can also just use gpt/<label>
# with that in mind we can not do an os.stat on "gpt/whatever"
# therefore return the devpath already here.
if devpath.startswith("gpt/"):
- log.debug("We have a gpt label - just go ahead")
+ LOG.debug("We have a gpt label - just go ahead")
return devpath
# Alternatively, our device could simply be a name as returned by gpart,
# such as da0p3
if not devpath.startswith("/dev/") and not os.path.exists(devpath):
fulldevpath = "/dev/" + devpath.lstrip("/")
- log.debug(
+ LOG.debug(
"'%s' doesn't appear to be a valid device path. Trying '%s'",
devpath,
fulldevpath,
@@ -195,13 +197,13 @@ def maybe_get_writable_device_path(devpath, info, log):
statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
- log.debug(
+ LOG.debug(
"Device '%s' did not exist in container. cannot resize: %s",
devpath,
info,
)
elif exc.errno == errno.ENOENT:
- log.warning(
+ LOG.warning(
"Device '%s' did not exist. cannot resize: %s", devpath, info
)
else:
@@ -210,35 +212,36 @@ def maybe_get_writable_device_path(devpath, info, log):
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
- log.debug(
+ LOG.debug(
"device '%s' not a block device in container."
- " cannot resize: %s" % (devpath, info)
+ " cannot resize: %s",
+ devpath,
+ info,
)
else:
- log.warning(
- "device '%s' not a block device. cannot resize: %s"
- % (devpath, info)
+ LOG.warning(
+ "device '%s' not a block device. cannot resize: %s",
+ devpath,
+ info,
)
return None
return devpath # The writable block devpath
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if len(args) != 0:
resize_root = args[0]
else:
resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
if not util.translate_bool(resize_root, addons=[NOBLOCK]):
- log.debug("Skipping module named %s, resizing disabled", name)
+ LOG.debug("Skipping module named %s, resizing disabled", name)
return
# TODO(harlowja): allow what is to be resized to be configurable??
resize_what = "/"
- result = util.get_mount_info(resize_what, log)
+ result = util.get_mount_info(resize_what, LOG)
if not result:
- log.warning("Could not determine filesystem type of %s", resize_what)
+ LOG.warning("Could not determine filesystem type of %s", resize_what)
return
(devpth, fs_type, mount_point) = result
@@ -256,15 +259,15 @@ def handle(
resize_what = zpool
info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
- log.debug("resize_info: %s" % info)
+ LOG.debug("resize_info: %s", info)
- devpth = maybe_get_writable_device_path(devpth, info, log)
+ devpth = maybe_get_writable_device_path(devpth, info)
if not devpth:
return # devpath was not a writable block device
resizer = None
if can_skip_resize(fs_type, resize_what, devpth):
- log.debug(
+ LOG.debug(
"Skip resize filesystem type %s for %s", fs_type, resize_what
)
return
@@ -276,7 +279,7 @@ def handle(
break
if not resizer:
- log.warning(
+ LOG.warning(
"Not resizing unknown filesystem type %s for %s",
fs_type,
resize_what,
@@ -284,7 +287,7 @@ def handle(
return
resize_cmd = resizer(resize_what, devpth)
- log.debug(
+ LOG.debug(
"Resizing %s (%s) using %s", resize_what, fs_type, " ".join(resize_cmd)
)
@@ -293,32 +296,32 @@ def handle(
# the resize command
util.fork_cb(
util.log_time,
- logfunc=log.debug,
+ logfunc=LOG.debug,
msg="backgrounded Resizing",
func=do_resize,
- args=(resize_cmd, log),
+ args=(resize_cmd),
)
else:
util.log_time(
- logfunc=log.debug,
+ logfunc=LOG.debug,
msg="Resizing",
func=do_resize,
- args=(resize_cmd, log),
+ args=(resize_cmd),
)
action = "Resized"
if resize_root == NOBLOCK:
action = "Resizing (via forking)"
- log.debug(
+ LOG.debug(
"%s root filesystem (type=%s, val=%s)", action, fs_type, resize_root
)
-def do_resize(resize_cmd, log):
+def do_resize(resize_cmd):
try:
subp.subp(resize_cmd)
except subp.ProcessExecutionError:
- util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
+ util.logexc(LOG, "Failed to resize filesystem (cmd=%s)", resize_cmd)
raise
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 4629ca7d..d8a7bfa0 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -8,7 +8,6 @@
"""Resolv Conf: configure resolv.conf"""
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -122,9 +121,7 @@ def generate_resolv_conf(template_fn, params, target_fname):
templater.render_to_file(template_fn, target_fname, params)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
"""
Handler for resolv.conf
@@ -135,7 +132,7 @@ def handle(
@param args: Any module arguments from cloud.cfg
"""
if "manage_resolv_conf" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s,"
" no 'manage_resolv_conf' key in configuration",
name,
@@ -143,7 +140,7 @@ def handle(
return
if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
- log.debug(
+ LOG.debug(
"Skipping module named %s,"
" 'manage_resolv_conf' present but set to False",
name,
@@ -151,7 +148,7 @@ def handle(
return
if "resolv_conf" not in cfg:
- log.warning("manage_resolv_conf True but no parameters provided!")
+ LOG.warning("manage_resolv_conf True but no parameters provided!")
return
try:
@@ -159,7 +156,7 @@ def handle(
RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn]
)
except KeyError:
- log.warning("No template found, not rendering resolve configs")
+ LOG.warning("No template found, not rendering resolve configs")
return
generate_resolv_conf(
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index ce88ec65..533862a9 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -5,7 +5,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Red Hat Subscription: Register Red Hat Enterprise Linux based system"""
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -80,12 +79,10 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
- sm = SubscriptionManager(cfg, log=log)
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
+ sm = SubscriptionManager(cfg, log=LOG)
if not sm.is_configured():
- log.debug("%s: module not configured.", name)
+ LOG.debug("%s: module not configured.", name)
return None
if not sm.is_registered():
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 9e84032a..729b30af 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -6,8 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
import os
-from logging import Logger
from urllib.parse import parse_qs
from cloudinit import url_helper as uhelp
@@ -51,6 +51,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
#
# The purpose of this script is to allow cloud-init to consume
@@ -70,19 +71,17 @@ __doc__ = get_meta_doc(meta)
#
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
get_userdata_raw = getattr(cloud, "get_userdata_raw", None)
if not get_userdata_raw or not callable(get_userdata_raw):
- log.debug("Failed to get raw userdata in module %s", name)
+ LOG.debug("Failed to get raw userdata in module %s", name)
return
ud = get_userdata_raw()
try:
mdict = parse_qs(ud)
if not mdict or MY_HOOKNAME not in mdict:
- log.debug(
+ LOG.debug(
"Skipping module %s, did not find %s in parsed raw userdata",
name,
MY_HOOKNAME,
@@ -90,7 +89,7 @@ def handle(
return
except Exception:
util.logexc(
- log, "Failed to parse query string %s into a dictionary", ud
+ LOG, "Failed to parse query string %s into a dictionary", ud
)
raise
@@ -113,18 +112,18 @@ def handle(
except Exception as e:
captured_excps.append(e)
util.logexc(
- log, "%s failed to read %s and write %s", MY_NAME, url, fname
+ LOG, "%s failed to read %s and write %s", MY_NAME, url, fname
)
if wrote_fns:
- log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
+ LOG.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
if len(wrote_fns) != len(urls):
skipped = len(urls) - len(wrote_fns)
- log.debug("%s urls were skipped or failed", skipped)
+ LOG.debug("%s urls were skipped or failed", skipped)
if captured_excps:
- log.warning(
+ LOG.warning(
"%s failed with exceptions, re-raising the last one",
len(captured_excps),
)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 9baaf094..9ecefa05 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -10,7 +10,6 @@
import os
import re
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -107,9 +106,9 @@ def load_config(cfg: dict) -> dict:
mycfg = cfg.get("rsyslog", {})
if isinstance(cfg.get("rsyslog"), list):
- LOG.warning(
- "DEPRECATION: This rsyslog list format is deprecated and will be "
- "removed in a future version of cloud-init. Use documented keys."
+ util.deprecate(
+ deprecated="The rsyslog key with value of type 'list'",
+ deprecated_version="22.2",
)
mycfg = {KEYNAME_CONFIGS: cfg.get("rsyslog")}
if KEYNAME_LEGACY_FILENAME in cfg:
@@ -297,11 +296,9 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
return "\n".join(lines) + "\n"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "rsyslog" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'rsyslog' key in configuration", name
)
return
@@ -319,7 +316,7 @@ def handle(
)
if not mycfg["configs"]:
- log.debug("Empty config rsyslog['configs'], nothing to do")
+ LOG.debug("Empty config rsyslog['configs'], nothing to do")
return
changes = apply_rsyslog_changes(
@@ -329,14 +326,14 @@ def handle(
)
if not changes:
- log.debug("restart of syslog not necessary, no changes made")
+ LOG.debug("restart of syslog not necessary, no changes made")
return
try:
restarted = reload_syslog(cloud.distro, command=mycfg[KEYNAME_RELOAD])
except subp.ProcessExecutionError as e:
restarted = False
- log.warning("Failed to reload syslog", e)
+ LOG.warning("Failed to reload syslog %s", str(e))
if restarted:
# This only needs to run if we *actually* restarted
@@ -344,7 +341,7 @@ def handle(
cloud.cycle_logging()
# This should now use rsyslog if
# the logging was setup to use it...
- log.debug("%s configured %s files", name, changes)
+ LOG.debug("%s configured %s files", name, changes)
# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 27c0429b..aacbfd12 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -8,8 +8,8 @@
"""Runcmd: run arbitrary commands at rc.local with output to the console"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import util
@@ -75,12 +75,12 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "runcmd" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'runcmd' key in configuration", name
)
return
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index f3a8c16c..27fddbbc 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -4,8 +4,8 @@
"""Salt Minion: Setup and run salt minion"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import safeyaml, subp, util
@@ -65,6 +65,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
@@ -98,12 +99,10 @@ class SaltConstants:
)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# If there isn't a salt key in the configuration don't do anything
if "salt_minion" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'salt_minion' key in configuration",
name,
)
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 3e093d0e..30bd69d9 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -7,8 +7,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Scripts Per Boot: Run per boot scripts"""
+import logging
import os
-from logging import Logger
from cloudinit import subp
from cloudinit.cloud import Cloud
@@ -37,20 +37,19 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
SCRIPT_SUBDIR = "per-boot"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning(
+ LOG.warning(
"Failed to run module %s (%s in %s)",
name,
SCRIPT_SUBDIR,
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 719b8a2a..583a0671 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -7,8 +7,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Scripts Per Instance: Run per instance scripts"""
+import logging
import os
-from logging import Logger
from cloudinit import subp
from cloudinit.cloud import Cloud
@@ -38,21 +38,19 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
-
+LOG = logging.getLogger(__name__)
SCRIPT_SUBDIR = "per-instance"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning(
+ LOG.warning(
"Failed to run module %s (%s in %s)",
name,
SCRIPT_SUBDIR,
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 42aa89b3..9a6b86fa 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -7,8 +7,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Scripts Per Once: Run one time scripts"""
+import logging
import os
-from logging import Logger
from cloudinit import subp
from cloudinit.cloud import Cloud
@@ -37,20 +37,19 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
SCRIPT_SUBDIR = "per-once"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning(
+ LOG.warning(
"Failed to run module %s (%s in %s)",
name,
SCRIPT_SUBDIR,
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index b6ae37f5..aaef65fe 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -7,8 +7,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Scripts User: Run user scripts"""
+import logging
import os
-from logging import Logger
from cloudinit import subp
from cloudinit.cloud import Cloud
@@ -38,14 +38,13 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
SCRIPT_SUBDIR = "scripts"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# This is written to by the user data handlers
# Ie, any custom shell scripts that come down
# go here...
@@ -53,7 +52,7 @@ def handle(
try:
subp.runparts(runparts_path)
except Exception:
- log.warning(
+ LOG.warning(
"Failed to run module %s (%s in %s)",
name,
SCRIPT_SUBDIR,
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index b3ee9df1..674a3ded 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -5,8 +5,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Scripts Vendor: Run vendor scripts"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
@@ -59,14 +59,13 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
SCRIPT_SUBDIR = "vendor"
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# This is written to by the vendor data handlers
# any vendor data shell scripts get placed in runparts_path
runparts_path = os.path.join(
@@ -78,7 +77,7 @@ def handle(
try:
subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
- log.warning(
+ LOG.warning(
"Failed to run module %s (%s in %s)",
name,
SCRIPT_SUBDIR,
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 1c1b81d8..0abf7957 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -11,7 +11,6 @@
import base64
import os
from io import BytesIO
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -110,9 +109,7 @@ def handle_random_seed_command(command, required, env=None):
subp.subp(command, env=env, capture=False)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
mycfg = cfg.get("random_seed", {})
seed_path = mycfg.get("file", "/dev/urandom")
seed_data = mycfg.get("data", b"")
@@ -129,7 +126,7 @@ def handle(
seed_data = seed_buf.getvalue()
if len(seed_data):
- log.debug(
+ LOG.debug(
"%s: adding %s bytes of random seed entropy to %s",
name,
len(seed_data),
@@ -144,7 +141,7 @@ def handle(
env["RANDOM_SEED_FILE"] = seed_path
handle_random_seed_command(command=command, required=req, env=env)
except ValueError as e:
- log.warning("handling random command [%s] failed: %s", command, e)
+ LOG.warning("handling random command [%s] failed: %s", command, e)
raise e
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index fa5c023c..106d0851 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -7,8 +7,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Set Hostname: Set hostname and FQDN"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import util
@@ -69,6 +69,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
class SetHostnameError(Exception):
@@ -79,11 +80,9 @@ class SetHostnameError(Exception):
"""
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(
+ LOG.debug(
"Configuration option 'preserve_hostname' is set,"
" not setting the hostname in module %s",
name,
@@ -113,18 +112,18 @@ def handle(
"hostname"
) or fqdn != prev_hostname.get("fqdn")
if not hostname_changed:
- log.debug("No hostname changes. Skipping set-hostname")
+ LOG.debug("No hostname changes. Skipping set-hostname")
return
if is_default and hostname == "localhost":
# https://github.com/systemd/systemd/commit/d39079fcaa05e23540d2b1f0270fa31c22a7e9f1
- log.debug("Hostname is localhost. Let other services handle this.")
+ LOG.debug("Hostname is localhost. Let other services handle this.")
return
- log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
+ LOG.debug("Setting the hostname to %s (%s)", fqdn, hostname)
try:
cloud.distro.set_hostname(hostname, fqdn)
except Exception as e:
msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
- util.logexc(log, msg)
+ util.logexc(LOG, msg)
raise SetHostnameError("%s: %s" % (msg, e)) from e
write_json(prev_fn, {"hostname": hostname, "fqdn": fqdn})
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 3a0b3f5b..d3707bfe 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -8,7 +8,6 @@
"""Set Passwords: Set user passwords and enable/disable SSH password auth"""
import re
-from logging import Logger
from string import ascii_letters, digits
from textwrap import dedent
from typing import List
@@ -133,10 +132,10 @@ def handle_ssh_pwauth(pw_auth, distro: Distro):
cfg_name = "PasswordAuthentication"
if isinstance(pw_auth, str):
- LOG.warning(
- "DEPRECATION: The 'ssh_pwauth' config key should be set to "
- "a boolean value. The string format is deprecated and will be "
- "removed in a future version of cloud-init."
+ util.deprecate(
+ deprecated="Using a string value for the 'ssh_pwauth' key",
+ deprecated_version="22.2",
+ extra_message="Use a boolean value with 'ssh_pwauth'.",
)
if util.is_true(pw_auth):
cfg_val = "yes"
@@ -172,9 +171,7 @@ def handle_ssh_pwauth(pw_auth, distro: Distro):
_restart_ssh_daemon(distro, service)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
distro: Distro = cloud.distro
if args:
# if run from command line, and give args, wipe the chpasswd['list']
@@ -192,19 +189,21 @@ def handle(
chfg = cfg["chpasswd"]
users_list = util.get_cfg_option_list(chfg, "users", default=[])
if "list" in chfg and chfg["list"]:
- log.warning(
- "DEPRECATION: key 'lists' is now deprecated. Use 'users'."
+ util.deprecate(
+ deprecated="Config key 'lists'",
+ deprecated_version="22.3",
+ extra_message="Use 'users' instead.",
)
if isinstance(chfg["list"], list):
- log.debug("Handling input for chpasswd as list.")
+ LOG.debug("Handling input for chpasswd as list.")
plist = util.get_cfg_option_list(chfg, "list", plist)
else:
- log.warning(
- "DEPRECATION: The chpasswd multiline string format is "
- "deprecated and will be removed from a future version of "
- "cloud-init. Use the list format instead."
+ util.deprecate(
+ deprecated="The chpasswd multiline string",
+ deprecated_version="22.2",
+ extra_message="Use string type instead.",
)
- log.debug("Handling input for chpasswd as multiline string.")
+ LOG.debug("Handling input for chpasswd as multiline string.")
multiline = util.get_cfg_option_str(chfg, "list")
if multiline:
plist = multiline.splitlines()
@@ -217,7 +216,7 @@ def handle(
if user:
plist = ["%s:%s" % (user, password)]
else:
- log.warning("No default or defined user to change password for.")
+ LOG.warning("No default or defined user to change password for.")
errors = []
if plist or users_list:
@@ -257,22 +256,22 @@ def handle(
users.append(u)
if users:
try:
- log.debug("Changing password for %s:", users)
+ LOG.debug("Changing password for %s:", users)
distro.chpasswd(plist_in, hashed=False)
except Exception as e:
errors.append(e)
util.logexc(
- log, "Failed to set passwords with chpasswd for %s", users
+ LOG, "Failed to set passwords with chpasswd for %s", users
)
if hashed_users:
try:
- log.debug("Setting hashed password for %s:", hashed_users)
+ LOG.debug("Setting hashed password for %s:", hashed_users)
distro.chpasswd(hashed_plist_in, hashed=True)
except Exception as e:
errors.append(e)
util.logexc(
- log,
+ LOG,
"Failed to set hashed passwords with chpasswd for %s",
hashed_users,
)
@@ -297,14 +296,14 @@ def handle(
expired_users.append(u)
except Exception as e:
errors.append(e)
- util.logexc(log, "Failed to set 'expire' for %s", u)
+ util.logexc(LOG, "Failed to set 'expire' for %s", u)
if expired_users:
- log.debug("Expired passwords for: %s users", expired_users)
+ LOG.debug("Expired passwords for: %s users", expired_users)
handle_ssh_pwauth(cfg.get("ssh_pwauth"), distro)
if len(errors):
- log.debug("%s errors occurred, re-raising the last one", len(errors))
+ LOG.debug("%s errors occurred, re-raising the last one", len(errors))
raise errors[-1]
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 3bf25f1e..841dc06b 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -6,7 +6,6 @@
import os
import sys
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -186,9 +185,7 @@ def run_commands(commands):
raise RuntimeError(msg)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
cfgin = cfg.get("snap", {})
if not cfgin:
LOG.debug(
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index add40c1c..c8249e61 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Spacewalk: Install and configure spacewalk"""
-from logging import Logger
+import logging
from textwrap import dedent
from cloudinit import subp
@@ -41,6 +41,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
distros = ["redhat", "fedora"]
@@ -67,15 +68,13 @@ def do_register(
profile_name,
ca_cert_path=def_ca_cert_path,
proxy=None,
- log=None,
activation_key=None,
):
- if log is not None:
- log.info(
- "Registering using `rhnreg_ks` profile '%s' into server '%s'",
- profile_name,
- server,
- )
+ LOG.info(
+ "Registering using `rhnreg_ks` profile '%s' into server '%s'",
+ profile_name,
+ server,
+ )
cmd = ["rhnreg_ks"]
cmd.extend(["--serverUrl", "https://%s/XMLRPC" % server])
cmd.extend(["--profilename", str(profile_name)])
@@ -88,11 +87,9 @@ def do_register(
subp.subp(cmd, capture=False)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "spacewalk" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'spacewalk' key in configuration",
name,
)
@@ -107,11 +104,10 @@ def handle(
spacewalk_server,
cloud.datasource.get_hostname(fqdn=True).hostname,
proxy=cfg.get("proxy"),
- log=log,
activation_key=cfg.get("activation_key"),
)
else:
- log.debug(
+ LOG.debug(
"Skipping module named %s, 'spacewalk/server' key"
" was not found in configuration",
name,
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 1ec889f3..57129776 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -8,10 +8,10 @@
"""SSH: Configure SSH and SSH keys"""
import glob
+import logging
import os
import re
import sys
-from logging import Logger
from textwrap import dedent
from typing import List, Optional, Sequence
@@ -170,6 +170,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
GENERATE_KEY_NAMES = ["rsa", "dsa", "ecdsa", "ed25519"]
pattern_unsupported_config_keys = re.compile(
@@ -196,9 +197,7 @@ for k in GENERATE_KEY_NAMES:
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
# remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True):
@@ -207,7 +206,7 @@ def handle(
try:
util.del_file(f)
except Exception:
- util.logexc(log, "Failed deleting key file %s", f)
+ util.logexc(LOG, "Failed deleting key file %s", f)
if "ssh_keys" in cfg:
# if there are keys and/or certificates in cloud-config, use them
@@ -218,7 +217,7 @@ def handle(
reason = "unsupported"
else:
reason = "unrecognized"
- log.warning('Skipping %s ssh_keys entry: "%s"', reason, key)
+ LOG.warning('Skipping %s ssh_keys entry: "%s"', reason, key)
continue
tgt_fn = CONFIG_KEY_TO_FILE[key][0]
tgt_perms = CONFIG_KEY_TO_FILE[key][1]
@@ -245,12 +244,12 @@ def handle(
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
subp.subp(cmd, capture=False)
- log.debug(
- f"Generated a key for {public_file} from {private_file}"
+ LOG.debug(
+ "Generated a key for %s from %s", public_file, private_file
)
except Exception:
util.logexc(
- log,
+ LOG,
"Failed generating a key for "
f"{public_file} from {private_file}",
)
@@ -288,10 +287,10 @@ def handle(
if e.exit_code == 1 and err.lower().startswith(
"unknown key"
):
- log.debug("ssh-keygen: unknown key type '%s'", keytype)
+ LOG.debug("ssh-keygen: unknown key type '%s'", keytype)
else:
util.logexc(
- log,
+ LOG,
"Failed generating key type %s to file %s",
keytype,
keyfile,
@@ -315,7 +314,7 @@ def handle(
try:
cloud.datasource.publish_host_keys(hostkeys)
except Exception:
- util.logexc(log, "Publishing host keys failed!")
+ util.logexc(LOG, "Publishing host keys failed!")
try:
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
@@ -329,7 +328,7 @@ def handle(
if util.get_cfg_option_bool(cfg, "allow_public_ssh_keys", True):
keys = cloud.get_public_ssh_keys() or []
else:
- log.debug(
+ LOG.debug(
"Skipping import of publish SSH keys per "
"config setting: allow_public_ssh_keys=False"
)
@@ -340,7 +339,7 @@ def handle(
apply_credentials(keys, user, disable_root, disable_root_opts)
except Exception:
- util.logexc(log, "Applying SSH credentials failed!")
+ util.logexc(LOG, "Applying SSH credentials failed!")
def apply_credentials(keys, user, disable_root, disable_root_opts):
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 4b4c3d60..0134b15a 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -7,7 +7,7 @@
import base64
import hashlib
-from logging import Logger
+import logging
from cloudinit import ssh_util, util
from cloudinit.cloud import Cloud
@@ -38,6 +38,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
def _split_hash(bin_hash):
@@ -115,11 +116,9 @@ def _pprint_key_entries(
)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if util.is_true(cfg.get("no_ssh_fingerprints", False)):
- log.debug(
+ LOG.debug(
"Skipping module named %s, logging of SSH fingerprints disabled",
name,
)
@@ -129,7 +128,7 @@ def handle(
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
if _cfg.get("no_create_home") or _cfg.get("system"):
- log.debug(
+ LOG.debug(
"Skipping printing of ssh fingerprints for user '%s' because "
"no home directory is created",
user_name,
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index ed5ac492..6c9a28e4 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -7,8 +7,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""SSH Import ID: Import SSH id"""
+import logging
import pwd
-from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
@@ -50,20 +50,19 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if not is_key_in_nested_dict(cfg, "ssh_import_id"):
- log.debug(
+ LOG.debug(
"Skipping module named ssh-import-id, no 'ssh_import_id'"
" directives found."
)
return
elif not subp.which(SSH_IMPORT_ID_BINARY):
- log.warning(
+ LOG.warning(
"ssh-import-id is not installed, but module ssh_import_id is "
"configured. Skipping module."
)
@@ -76,7 +75,7 @@ def handle(
if len(args) > 1:
ids = args[1:]
- import_ssh_ids(ids, user, log)
+ import_ssh_ids(ids, user)
return
# import for cloudinit created users
@@ -90,14 +89,14 @@ def handle(
try:
import_ids = user_cfg["ssh_import_id"]
except Exception:
- log.debug("User %s is not configured for ssh_import_id", user)
+ LOG.debug("User %s is not configured for ssh_import_id", user)
continue
try:
import_ids = util.uniq_merge(import_ids)
import_ids = [str(i) for i in import_ids]
except Exception:
- log.debug(
+ LOG.debug(
"User %s is not correctly configured for ssh_import_id", user
)
continue
@@ -106,10 +105,10 @@ def handle(
continue
try:
- import_ssh_ids(import_ids, user, log)
+ import_ssh_ids(import_ids, user)
except Exception as exc:
util.logexc(
- log, "ssh-import-id failed for: %s %s", user, import_ids
+ LOG, "ssh-import-id failed for: %s %s", user, import_ids
)
elist.append(exc)
@@ -117,10 +116,10 @@ def handle(
raise elist[0]
-def import_ssh_ids(ids, user, log):
+def import_ssh_ids(ids, user):
if not (user and ids):
- log.debug("empty user(%s) or ids(%s). not importing", user, ids)
+ LOG.debug("empty user(%s) or ids(%s). not importing", user, ids)
return
try:
@@ -160,12 +159,12 @@ def import_ssh_ids(ids, user, log):
user,
SSH_IMPORT_ID_BINARY,
] + ids
- log.debug("Importing SSH ids for user %s.", user)
+ LOG.debug("Importing SSH ids for user %s.", user)
try:
subp.subp(cmd, capture=False)
except subp.ProcessExecutionError as exc:
- util.logexc(log, "Failed to run command to import %s SSH ids", user)
+ util.logexc(LOG, "Failed to run command to import %s SSH ids", user)
raise exc
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index 7436adf3..726bad73 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -7,7 +7,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Timezone: Set the system timezone"""
-from logging import Logger
+import logging
from cloudinit import util
from cloudinit.cloud import Cloud
@@ -34,18 +34,17 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if len(args) != 0:
timezone = args[0]
else:
timezone = util.get_cfg_option_str(cfg, "timezone", False)
if not timezone:
- log.debug("Skipping module named %s, no 'timezone' specified", name)
+ LOG.debug("Skipping module named %s, no 'timezone' specified", name)
return
# Let the distro handle settings its timezone
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 9dd8f3a2..b85db6a7 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -4,7 +4,6 @@
import json
import re
-from logging import Logger
from textwrap import dedent
from typing import Any, List
from urllib.parse import urlparse
@@ -398,7 +397,11 @@ def _should_auto_attach(ua_section: dict) -> bool:
# pylint: enable=import-error
try:
- result = should_auto_attach()
+ result = util.log_time(
+ logfunc=LOG.debug,
+ msg="Checking if the instance can be attached to Ubuntu Pro",
+ func=should_auto_attach,
+ )
except UserFacingError as ex:
LOG.debug("Error during `should_auto_attach`: %s", ex)
LOG.warning(ERROR_MSG_SHOULD_AUTO_ATTACH)
@@ -440,7 +443,12 @@ def _auto_attach(ua_section: dict):
enable_beta=enable_beta,
)
try:
- full_auto_attach(options=options)
+ util.log_time(
+ logfunc=LOG.debug,
+ msg="Attaching to Ubuntu Pro",
+ func=full_auto_attach,
+ kwargs={"options": options},
+ )
except AlreadyAttachedError:
if enable_beta is not None or enable is not None:
# Only warn if the user defined some service to enable/disable.
@@ -455,9 +463,7 @@ def _auto_attach(ua_section: dict):
raise RuntimeError(msg) from ex
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
ua_section = None
if "ubuntu-advantage" in cfg:
LOG.warning(
@@ -495,6 +501,9 @@ def handle(
# ua-auto-attach.service had noop-ed as ua_section is not empty
validate_schema_features(ua_section)
+ LOG.debug(
+ "To discover more log info, please check /var/log/ubuntu-advantage.log"
+ )
if _should_auto_attach(ua_section):
_auto_attach(ua_section)
diff --git a/cloudinit/config/cc_ubuntu_autoinstall.py b/cloudinit/config/cc_ubuntu_autoinstall.py
index 3870cf59..0eab281b 100644
--- a/cloudinit/config/cc_ubuntu_autoinstall.py
+++ b/cloudinit/config/cc_ubuntu_autoinstall.py
@@ -3,7 +3,6 @@
"""Autoinstall: Support ubuntu live-server autoinstall syntax."""
import re
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -75,9 +74,7 @@ __doc__ = get_meta_doc(meta)
LIVE_INSTALLER_SNAPS = ("subiquity", "ubuntu-desktop-installer")
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "autoinstall" not in cfg:
LOG.debug(
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 59347e25..fb340e79 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -16,7 +16,6 @@ except ImportError:
debconf = None
HAS_DEBCONF = False
-from logging import Logger
from cloudinit import log as logging
from cloudinit import subp, temp_utils, type_utils, util
@@ -140,14 +139,12 @@ def install_drivers(cfg, pkg_install_func, distro: Distro):
raise
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if "drivers" not in cfg:
- log.debug("Skipping module named %s, no 'drivers' key in config", name)
+ LOG.debug("Skipping module named %s, no 'drivers' key in config", name)
return
if not HAS_DEBCONF:
- log.warning(
+ LOG.warning(
"Skipping module named %s, 'python3-debconf' is not installed",
name,
)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 7bb9dff0..c3e5b71e 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -8,7 +8,7 @@
"""Update Etc Hosts: Update the hosts file (usually ``/etc/hosts``)"""
-from logging import Logger
+import logging
from textwrap import dedent
from cloudinit import templater, util
@@ -95,24 +95,24 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
hosts_fn = cloud.distro.hosts_fn
if util.translate_bool(manage_hosts, addons=["template"]):
if manage_hosts == "template":
- log.warning(
- "DEPRECATED: please use manage_etc_hosts: true instead of"
- " 'template'"
+ util.deprecate(
+ deprecated="Value 'template' for key 'manage_etc_hosts'",
+ deprecated_version="22.2",
+ extra_message="Use 'true' instead.",
)
(hostname, fqdn, _) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(
+ LOG.warning(
"Option 'manage_etc_hosts' was set, but no hostname was found"
)
return
@@ -134,15 +134,15 @@ def handle(
elif manage_hosts == "localhost":
(hostname, fqdn, _) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(
+ LOG.warning(
"Option 'manage_etc_hosts' was set, but no hostname was found"
)
return
- log.debug("Managing localhost in %s", hosts_fn)
+ LOG.debug("Managing localhost in %s", hosts_fn)
cloud.distro.update_etc_hosts(hostname, fqdn)
else:
- log.debug(
+ LOG.debug(
"Configuration option 'manage_etc_hosts' is not set,"
" not managing %s in module %s",
hosts_fn,
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 8a99297f..8de51147 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -8,8 +8,8 @@
"""Update Hostname: Update hostname and fqdn"""
+import logging
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import util
@@ -80,13 +80,12 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(
+ LOG.debug(
"Configuration option 'preserve_hostname' is set,"
" not updating the hostname in module %s",
name,
@@ -103,16 +102,16 @@ def handle(
(hostname, fqdn, is_default) = util.get_hostname_fqdn(cfg, cloud)
if is_default and hostname == "localhost":
# https://github.com/systemd/systemd/commit/d39079fcaa05e23540d2b1f0270fa31c22a7e9f1
- log.debug("Hostname is localhost. Let other services handle this.")
+ LOG.debug("Hostname is localhost. Let other services handle this.")
return
try:
prev_fn = os.path.join(cloud.get_cpath("data"), "previous-hostname")
- log.debug("Updating hostname to %s (%s)", fqdn, hostname)
+ LOG.debug("Updating hostname to %s (%s)", fqdn, hostname)
cloud.distro.update_hostname(hostname, fqdn, prev_fn)
except Exception:
util.logexc(
- log, "Failed to update the hostname to %s (%s)", fqdn, hostname
+ LOG, "Failed to update the hostname to %s (%s)", fqdn, hostname
)
raise
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index c654270e..52f0b844 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -6,7 +6,6 @@
"Users and Groups: Configure users and groups"
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -161,9 +160,7 @@ NO_HOME = ("no_create_home", "system")
NEED_HOME = ("ssh_authorized_keys", "ssh_import_id", "ssh_redirect_user")
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
(users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(default_user, _user_config) = ug_util.extract_default(users)
cloud_keys = cloud.get_public_ssh_keys() or []
diff --git a/cloudinit/config/cc_wireguard.py b/cloudinit/config/cc_wireguard.py
index 732440f0..1e8ad890 100644
--- a/cloudinit/config/cc_wireguard.py
+++ b/cloudinit/config/cc_wireguard.py
@@ -4,7 +4,6 @@
"""Wireguard"""
import re
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -258,9 +257,7 @@ def load_wireguard_kernel_module():
raise
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
wg_section = None
if "wireguard" in cfg:
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index a517d044..f928dcc8 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -8,7 +8,6 @@
import base64
import os
-from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
@@ -119,9 +118,7 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -129,7 +126,7 @@ def handle(
if not util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
]
if not filtered_files:
- log.debug(
+ LOG.debug(
"Skipping module named %s,"
" no/empty 'write_files' key in configuration",
name,
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
index a196ffb9..3c90f5c3 100644
--- a/cloudinit/config/cc_write_files_deferred.py
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -4,7 +4,7 @@
"""Write Files Deferred: Defer writing certain files"""
-from logging import Logger
+import logging
from cloudinit import util
from cloudinit.cloud import Cloud
@@ -36,11 +36,10 @@ meta: MetaSchema = {
# This module is undocumented in our schema docs
__doc__ = ""
+LOG = logging.getLogger(__name__)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -48,7 +47,7 @@ def handle(
if util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
]
if not filtered_files:
- log.debug(
+ LOG.debug(
"Skipping module named %s,"
" no deferred file defined in configuration",
name,
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 3087b22c..097493ba 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -7,9 +7,9 @@
"Yum Add Repo: Add yum repository configuration to the system"
import io
+import logging
import os
from configparser import ConfigParser
-from logging import Logger
from textwrap import dedent
from cloudinit import util
@@ -123,6 +123,7 @@ meta: MetaSchema = {
}
__doc__ = get_meta_doc(meta)
+LOG = logging.getLogger(__name__)
def _canonicalize_id(repo_id: str) -> str:
@@ -169,12 +170,10 @@ def _format_repository_config(repo_id, repo_config):
return "".join(lines)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
repos = cfg.get("yum_repos")
if not repos:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'yum_repos' configuration found",
name,
)
@@ -188,14 +187,14 @@ def handle(
canon_repo_id = _canonicalize_id(repo_id)
repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
if os.path.exists(repo_fn_pth):
- log.info(
+ LOG.info(
"Skipping repo %s, file %s already exists!",
repo_id,
repo_fn_pth,
)
continue
elif canon_repo_id in repo_locations:
- log.info(
+ LOG.info(
"Skipping repo %s, file %s already pending!",
repo_id,
repo_fn_pth,
@@ -213,7 +212,7 @@ def handle(
missing_required = 0
for req_field in ["baseurl"]:
if req_field not in repo_config:
- log.warning(
+ LOG.warning(
"Repository %s does not contain a %s"
" configuration 'required' entry",
repo_id,
@@ -224,7 +223,7 @@ def handle(
repo_configs[canon_repo_id] = repo_config
repo_locations[canon_repo_id] = repo_fn_pth
else:
- log.warning(
+ LOG.warning(
"Repository %s is missing %s required fields, skipping!",
repo_id,
missing_required,
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index 958e4f94..9bbc4c33 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -6,7 +6,6 @@
"""zypper_add_repo: Add zypper repositories to the system"""
import os
-from logging import Logger
from textwrap import dedent
import configobj
@@ -190,9 +189,7 @@ def _write_zypp_config(zypper_config):
util.write_file(zypp_config, new_config)
-def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
-) -> None:
+def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
zypper_section = cfg.get("zypper")
if not zypper_section:
LOG.debug(
diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py
index 6716fc32..def6b795 100644
--- a/cloudinit/config/modules.py
+++ b/cloudinit/config/modules.py
@@ -7,6 +7,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
+from inspect import signature
from types import ModuleType
from typing import Dict, List, NamedTuple, Optional
@@ -221,17 +222,12 @@ class Modules:
# and which ones failed + the exception of why it failed
failures = []
which_ran = []
- for (mod, name, freq, args) in mostly_mods:
+ for mod, name, freq, args in mostly_mods:
try:
LOG.debug(
"Running module %s (%s) with frequency %s", name, mod, freq
)
- # Use the configs logger and not our own
- # TODO(harlowja): possibly check the module
- # for having a LOG attr and just give it back
- # its own logger?
- func_args = [name, self.cfg, cc, LOG, args]
# Mark it as having started running
which_ran.append(name)
# This name will affect the semaphore name created
@@ -241,8 +237,22 @@ class Modules:
myrep = ReportEventStack(
name=run_name, description=desc, parent=self.reporter
)
+ func_args = {
+ "name": name,
+ "cfg": self.cfg,
+ "cloud": cc,
+ "args": args,
+ }
with myrep:
+ func_signature = signature(mod.handle)
+ func_params = func_signature.parameters
+ if len(func_params) == 5:
+ util.deprecate(
+ deprecated="Config modules with a `log` parameter",
+ deprecated_version="23.2",
+ )
+ func_args.update({"log": LOG})
ran, _r = cc.run(
run_name, mod.handle, func_args, freq=freq
)
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index b0b5fccf..0669defc 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -476,7 +476,9 @@ def validate_cloudconfig_schema(
prefix="Deprecated cloud-config provided:\n",
separator="\n",
)
- LOG.warning(message)
+ # This warning doesn't fit the standardized util.deprecated() utility
+ # format, but it is a deprecation log, so log it directly.
+ LOG.deprecated(message) # type: ignore
if strict and (errors or deprecations):
raise SchemaValidationError(errors, deprecations)
if errors:
@@ -1137,19 +1139,6 @@ def get_schema() -> dict:
return full_schema
-def get_meta() -> dict:
- """Return metadata coalesced from all cc_* cloud-config module."""
- full_meta = dict()
- for (_, mod_name) in get_modules().items():
- mod_locs, _ = importer.find_module(
- mod_name, ["cloudinit.config"], ["meta"]
- )
- if mod_locs:
- mod = importer.import_module(mod_locs[0])
- full_meta[mod.meta["id"]] = mod.meta
- return full_meta
-
-
def get_parser(parser=None):
"""Return a parser for supported cmdline arguments."""
if not parser:
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
index 10636e6d..2a2d8631 100644
--- a/cloudinit/config/schemas/schema-cloud-config-v1.json
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -1343,7 +1343,7 @@
"description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device"
},
"grub-pc/install_devices_empty": {
- "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``.",
+ "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``",
"oneOf": [
{
"type": "boolean"
@@ -1355,11 +1355,16 @@
"changed_description": "Use a boolean value instead."
}
]
+ },
+ "grub-efi/install_devices": {
+ "type": "string",
+ "description": "Partition to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot/efi`` will be used to find the partition"
}
}
},
"grub-dpkg": {
"type": "object",
+ "description": "An alias for ``grub_dpkg``",
"deprecated": true,
"deprecated_version": "22.2",
"deprecated_description": "Use ``grub_dpkg`` instead."
diff --git a/cloudinit/dhclient_hook.py b/cloudinit/dhclient_hook.py
deleted file mode 100644
index 46b2e8d9..00000000
--- a/cloudinit/dhclient_hook.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Run the dhclient hook to record network info."""
-
-import argparse
-import os
-
-from cloudinit import atomic_helper
-from cloudinit import log as logging
-from cloudinit import stages
-
-LOG = logging.getLogger(__name__)
-
-NAME = "dhclient-hook"
-UP = "up"
-DOWN = "down"
-EVENTS = (UP, DOWN)
-
-
-def _get_hooks_dir():
- i = stages.Init()
- return os.path.join(i.paths.get_runpath(), "dhclient.hooks")
-
-
-def _filter_env_vals(info):
- """Given info (os.environ), return a dictionary with
- lower case keys for each entry starting with DHCP4_ or new_."""
- new_info = {}
- for k, v in info.items():
- if k.startswith("DHCP4_") or k.startswith("new_"):
- key = (k.replace("DHCP4_", "").replace("new_", "")).lower()
- new_info[key] = v
- return new_info
-
-
-def run_hook(interface, event, data_d=None, env=None):
- if event not in EVENTS:
- raise ValueError(
- "Unexpected event '%s'. Expected one of: %s" % (event, EVENTS)
- )
- if data_d is None:
- data_d = _get_hooks_dir()
- if env is None:
- env = os.environ
- hook_file = os.path.join(data_d, interface + ".json")
-
- if event == UP:
- if not os.path.exists(data_d):
- os.makedirs(data_d)
- atomic_helper.write_json(hook_file, _filter_env_vals(env))
- LOG.debug("Wrote dhclient options in %s", hook_file)
- elif event == DOWN:
- if os.path.exists(hook_file):
- os.remove(hook_file)
- LOG.debug("Removed dhclient options file %s", hook_file)
-
-
-def get_parser(parser=None):
- if parser is None:
- parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
- parser.add_argument(
- "event", help="event taken on the interface", choices=EVENTS
- )
- parser.add_argument(
- "interface", help="the network interface being acted upon"
- )
- # cloud-init main uses 'action'
- parser.set_defaults(action=(NAME, handle_args))
- return parser
-
-
-def handle_args(name, args, data_d=None):
- """Handle the Namespace args.
- Takes 'name' as passed by cloud-init main. not used here."""
- return run_hook(interface=args.interface, event=args.event, data_d=data_d)
-
-
-if __name__ == "__main__":
- import sys
-
- parser = get_parser()
- args = parser.parse_args(args=sys.argv[1:])
- return_value = handle_args(
- NAME, args, data_d=os.environ.get("_CI_DHCP_HOOK_DATA_D")
- )
- if return_value:
- sys.exit(return_value)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 940b689e..b82852e1 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -549,12 +549,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
groups = groups.split(",")
if isinstance(groups, dict):
- LOG.warning(
- "DEPRECATED: The user %s has a 'groups' config value of"
- " type dict which is deprecated and will be removed in a"
- " future version of cloud-init. Use a comma-delimited"
- " string or array instead: group1,group2.",
- name,
+ util.deprecate(
+ deprecated=f"The user {name} has a 'groups' config value "
+ "of type dict",
+ deprecated_version="22.3",
+ extra_message="Use a comma-delimited string or "
+ "array instead: group1,group2.",
)
# remove any white spaces in group names, most likely
@@ -682,11 +682,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
if kwargs["sudo"]:
self.write_sudo_rules(name, kwargs["sudo"])
elif kwargs["sudo"] is False:
- LOG.warning(
- "DEPRECATED: The user %s has a 'sudo' config value of"
- " 'false' which will be dropped after April 2027."
- " Use 'null' instead.",
- name,
+ util.deprecate(
+ deprecated=f"The value of 'false' in user {name}'s "
+ "'sudo' config",
+ deprecated_version="22.3",
+ extra_message="Use 'null' instead.",
)
# Import SSH keys
@@ -992,37 +992,6 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
**kwargs,
)
- @property
- def is_virtual(self) -> Optional[bool]:
- """Detect if running on a virtual machine or bare metal.
-
- If the detection fails, it returns None.
- """
- if not uses_systemd():
- # For non systemd systems the method should be
- # implemented in the distro class.
- LOG.warning("is_virtual should be implemented on distro class")
- return None
-
- try:
- detect_virt_path = subp.which("systemd-detect-virt")
- if detect_virt_path:
- out, _ = subp.subp(
- [detect_virt_path], capture=True, rcs=[0, 1]
- )
-
- return not out.strip() == "none"
- else:
- err_msg = "detection binary not found"
- except subp.ProcessExecutionError as e:
- err_msg = str(e)
-
- LOG.warning(
- "Failed to detect virtualization with systemd-detect-virt: %s",
- err_msg,
- )
- return None
-
def _apply_hostname_transformations_to_url(url: str, transformations: list):
"""
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 706d0743..4268abe6 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -6,9 +6,7 @@
import os
import re
-from functools import lru_cache
from io import StringIO
-from typing import Optional
import cloudinit.distros.bsd
from cloudinit import log as logging
@@ -194,40 +192,5 @@ class Distro(cloudinit.distros.bsd.BSD):
freq=PER_INSTANCE,
)
- @lru_cache()
- def is_container(self) -> bool:
- """return whether we're running in a container.
- Cached, because it's unlikely to change."""
- jailed, _ = subp.subp(["sysctl", "-n", "security.jail.jailed"])
- if jailed.strip() == "0":
- return False
- return True
-
- @lru_cache()
- def virtual(self) -> str:
- """return the kind of virtualisation system we're running under.
- Cached, because it's unlikely to change."""
- if self.is_container():
- return "jail"
- # map FreeBSD's kern.vm_guest to systemd-detect-virt, just like we do
- # in ds-identify
- VM_GUEST_TO_SYSTEMD = {
- "hv": "microsoft",
- "vbox": "oracle",
- "generic": "vm-other",
- }
- vm, _ = subp.subp(["sysctl", "-n", "kern.vm_guest"])
- vm = vm.strip()
- if vm in VM_GUEST_TO_SYSTEMD:
- return VM_GUEST_TO_SYSTEMD[vm]
- return vm
-
- @property
- def is_virtual(self) -> Optional[bool]:
- """Detect if running on a virtual machine or bare metal.
- This can fail on some platforms, so the signature is Optional[bool]
- """
- if self.virtual() == "none":
- return False
- return True
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index b3232feb..b9811e6a 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -100,13 +100,6 @@ class NetBSD(cloudinit.distros.bsd.BSD):
raise
self.unlock_passwd(user)
- def force_passwd_change(self, user):
- try:
- subp.subp(["usermod", "-F", user])
- except Exception:
- util.logexc(LOG, "Failed to set pw expiration for %s", user)
- raise
-
def lock_passwd(self, name):
try:
subp.subp(["usermod", "-C", "yes", name])
diff --git a/cloudinit/distros/parsers/ifconfig.py b/cloudinit/distros/parsers/ifconfig.py
index 3e57e41a..0897beba 100644
--- a/cloudinit/distros/parsers/ifconfig.py
+++ b/cloudinit/distros/parsers/ifconfig.py
@@ -71,10 +71,6 @@ class Ifstate:
def is_vlan(self) -> bool:
return ("vlan" in self.groups) or (self.vlan != {})
- @property
- def is_wlan(self) -> bool:
- return "wlan" in self.groups
-
class Ifconfig:
"""
@@ -201,9 +197,6 @@ class Ifconfig:
self._ifs_by_mac = dict(ifs_by_mac)
return {**self._ifs_by_name, **self._ifs_by_mac}
- def ifs_by_name(self):
- return self._ifs_by_name
-
def ifs_by_mac(self):
return self._ifs_by_mac
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index df7dc3d6..7fb7e56d 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -18,13 +18,6 @@ from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-def _make_sysconfig_bool(val):
- if val:
- return "yes"
- else:
- return "no"
-
-
class Distro(distros.Distro):
# See: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Network_Configuration_Using_sysconfig_Files.html # noqa
clock_conf_fn = "/etc/sysconfig/clock"
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index e0a4d068..2697527d 100644
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -174,9 +174,10 @@ def normalize_users_groups(cfg, distro):
# Translate it into a format that will be more useful going forward
if isinstance(old_user, str):
old_user = {"name": old_user}
- LOG.warning(
- "DEPRECATED: 'user' of type string is deprecated and will"
- " be removed in a future release. Use 'users' list instead."
+ util.deprecate(
+ deprecated="'user' of type string",
+ deprecated_version="22.2",
+ extra_message="Use 'users' list instead.",
)
elif not isinstance(old_user, dict):
LOG.warning(
@@ -206,10 +207,10 @@ def normalize_users_groups(cfg, distro):
base_users = cfg.get("users", [])
if isinstance(base_users, (dict, str)):
- LOG.warning(
- "DEPRECATED: 'users' of type %s is deprecated and will be removed"
- " in a future release. Use 'users' as a list.",
- type(base_users),
+ util.deprecate(
+ deprecated=f"'users' of type {type(base_users)}",
+ deprecated_version="22.2",
+ extra_message="Use 'users' as a list.",
)
elif not isinstance(base_users, (list)):
LOG.warning(
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 3e90f07d..78ddb794 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -43,9 +43,6 @@ class DummySemaphores:
def clear(self, _name, _freq):
return True
- def clear_all(self):
- pass
-
class FileLock:
def __init__(self, fn):
@@ -83,14 +80,6 @@ class FileSemaphores:
return False
return True
- def clear_all(self):
- try:
- util.del_dir(self.sem_path)
- except (IOError, OSError):
- util.logexc(
- LOG, "Failed deleting semaphore directory %s", self.sem_path
- )
-
def _acquire(self, name, freq):
# Check again if its been already gotten
if self.has_run(name, freq):
diff --git a/cloudinit/log.py b/cloudinit/log.py
index f40201bb..d9912a50 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -63,6 +63,16 @@ def flushLoggers(root):
flushLoggers(root.parent)
+def defineDeprecationLogger(lvl=35):
+ logging.addLevelName(lvl, "DEPRECATED")
+
+ def deprecated(self, message, *args, **kwargs):
+ if self.isEnabledFor(lvl):
+ self._log(lvl, message, args, **kwargs)
+
+ logging.Logger.deprecated = deprecated
+
+
def setupLogging(cfg=None):
# See if the config provides any logging conf...
if not cfg:
@@ -83,6 +93,7 @@ def setupLogging(cfg=None):
log_cfgs.append("\n".join(cfg_str))
else:
log_cfgs.append(str(a_cfg))
+ defineDeprecationLogger()
# See if any of them actually load...
am_tried = 0
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 50e445ec..46bce184 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -167,6 +167,10 @@ def master_is_openvswitch(devname):
return os.path.exists(ovs_path)
+def is_ib_interface(devname):
+ return read_sys_net_safe(devname, "type") == "32"
+
+
@functools.lru_cache(maxsize=None)
def openvswitch_is_installed() -> bool:
"""Return a bool indicating if Open vSwitch is installed in the system."""
@@ -882,7 +886,7 @@ def _rename_interfaces(
)
if len(errors):
- raise Exception("\n".join(errors))
+ raise RuntimeError("\n".join(errors))
def get_interface_mac(ifname):
@@ -1040,7 +1044,7 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
# cloud-init happens to enumerate network interfaces before drivers
# have fully initialized the leader/subordinate relationships for
# those devices or switches.
- if driver == "mscc_felix" or driver == "fsl_enetc":
+ if driver in ("fsl_enetc", "mscc_felix", "qmi_wwan"):
LOG.debug(
"Ignoring duplicate macs from '%s' and '%s' due to "
"driver '%s'.",
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
index 7d11a02c..e69da40d 100644
--- a/cloudinit/net/activators.py
+++ b/cloudinit/net/activators.py
@@ -72,24 +72,6 @@ class NetworkActivator(ABC):
[i["name"] for i in network_state.iter_interfaces()]
)
- @classmethod
- def bring_down_interfaces(cls, device_names: Iterable[str]) -> bool:
- """Bring down specified list of interfaces.
-
- Return True is successful, otherwise return False
- """
- return all(cls.bring_down_interface(device) for device in device_names)
-
- @classmethod
- def bring_down_all_interfaces(cls, network_state: NetworkState) -> bool:
- """Bring down all interfaces.
-
- Return True is successful, otherwise return False
- """
- return cls.bring_down_interfaces(
- [i["name"] for i in network_state.iter_interfaces()]
- )
-
class IfUpDownActivator(NetworkActivator):
# Note that we're not overriding bring_up_interfaces to pass something
@@ -205,26 +187,6 @@ class NetplanActivator(NetworkActivator):
)
return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
- @staticmethod
- def bring_down_interfaces(device_names: Iterable[str]) -> bool:
- """Apply netplan config.
-
- Return True is successful, otherwise return False
- """
- LOG.debug(
- "Calling 'netplan apply' rather than "
- "altering individual interfaces"
- )
- return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
-
- @staticmethod
- def bring_down_all_interfaces(network_state: NetworkState) -> bool:
- """Apply netplan config.
-
- Return True is successful, otherwise return False
- """
- return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
-
class NetworkdActivator(NetworkActivator):
@staticmethod
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index b23279e5..8892b3ba 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -222,9 +222,6 @@ class BSDRenderer(renderer.Renderer):
def write_config(self, target=None):
raise NotImplementedError()
- def set_gateway(self, gateway):
- raise NotImplementedError()
-
def rename_interface(self, cur_name, device_name):
raise NotImplementedError()
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index a9a1c980..a8949ebc 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -14,8 +14,13 @@ from io import StringIO
import configobj
-from cloudinit import subp, util
-from cloudinit.net import find_fallback_nic, get_devicelist
+from cloudinit import subp, temp_utils, util
+from cloudinit.net import (
+ find_fallback_nic,
+ get_devicelist,
+ get_interface_mac,
+ is_ib_interface,
+)
LOG = logging.getLogger(__name__)
@@ -42,7 +47,7 @@ class NoDHCPLeaseMissingDhclientError(NoDHCPLeaseError):
"""Raised when unable to find dhclient."""
-def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None, tmp_dir=None):
+def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
If the nic is invalid or undiscoverable or dhclient command is not found,
@@ -51,7 +56,6 @@ def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None, tmp_dir=None):
@param nic: Name of the network interface we want to run dhclient on.
@param dhcp_log_func: A callable accepting the dhclient output and error
streams.
- @param tmp_dir: Tmp dir with exec permissions.
@return: A list of dicts representing dhcp options for each lease obtained
from the dhclient discovery if run, otherwise an empty list is
returned.
@@ -137,6 +141,9 @@ def dhcp_discovery(dhclient_cmd_path, interface, dhcp_log_func=None):
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
subp.subp(["ip", "link", "set", "dev", interface, "up"], capture=True)
+ # For INFINIBAND port the dhlient must be sent with dhcp-client-identifier.
+ # So here we are checking if the interface is INFINIBAND or not.
+ # If yes, we are generating the the client-id to be used with the dhclient
cmd = [
dhclient_cmd_path,
"-1",
@@ -149,6 +156,18 @@ def dhcp_discovery(dhclient_cmd_path, interface, dhcp_log_func=None):
"-sf",
"/bin/true",
]
+ if is_ib_interface(interface):
+ dhcp_client_identifier = "20:%s" % get_interface_mac(interface)[36:]
+ interface_dhclient_content = (
+ 'interface "%s" '
+ "{send dhcp-client-identifier %s;}"
+ % (interface, dhcp_client_identifier)
+ )
+ tmp_dir = temp_utils.get_tmp_ancestor(needs_exe=True)
+ file_name = os.path.join(tmp_dir, interface + "-dhclient.conf")
+ util.write_file(file_name, interface_dhclient_content)
+ cmd.append("-cf")
+ cmd.append(file_name)
out, err = subp.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 53bd35ca..92aa83de 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -308,18 +308,6 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
ifaces[iface]["auto"] = False
-def parse_deb_config(path):
- """Parses a debian network configuration file."""
- ifaces = {}
- with open(path, "r") as fp:
- contents = fp.read().strip()
- abs_path = os.path.abspath(path)
- _parse_deb_config_data(
- ifaces, contents, os.path.dirname(abs_path), abs_path
- )
- return ifaces
-
-
def convert_eni_data(eni_data):
# return a network config representation of what is in eni_data
ifaces = {}
@@ -329,7 +317,7 @@ def convert_eni_data(eni_data):
def _ifaces_to_net_config_data(ifaces):
"""Return network config that represents the ifaces data provided.
- ifaces = parse_deb_config("/etc/network/interfaces")
+ ifaces = _parse_deb_config_data(...)
config = ifaces_to_net_config_data(ifaces)
state = parse_net_config_data(config)."""
devs = {}
@@ -576,7 +564,9 @@ class Renderer(renderer.Renderer):
netrules = subp.target_path(target, self.netrules_path)
util.ensure_dir(os.path.dirname(netrules))
util.write_file(
- netrules, self._render_persistent_net(network_state)
+ netrules,
+ content=self._render_persistent_net(network_state),
+ preserve_mode=True,
)
diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py
index 1dfde6e0..cade2e5f 100644
--- a/cloudinit/net/ephemeral.py
+++ b/cloudinit/net/ephemeral.py
@@ -107,22 +107,6 @@ class EphemeralIPv4Network:
for cmd in self.cleanup_cmds:
subp.subp(cmd, capture=True)
- def _delete_address(self, address, prefix):
- """Perform the ip command to remove the specified address."""
- subp.subp(
- [
- "ip",
- "-family",
- "inet",
- "addr",
- "del",
- "%s/%s" % (address, prefix),
- "dev",
- self.interface,
- ],
- capture=True,
- )
-
def _bringup_device(self):
"""Perform the ip comands to fully setup the device."""
cidr = "{0}/{1}".format(self.ip, self.prefix)
@@ -315,14 +299,12 @@ class EphemeralDHCPv4:
iface=None,
connectivity_url_data: Optional[Dict[str, Any]] = None,
dhcp_log_func=None,
- tmp_dir=None,
):
self.iface = iface
self._ephipv4 = None
self.lease = None
self.dhcp_log_func = dhcp_log_func
self.connectivity_url_data = connectivity_url_data
- self.tmp_dir = tmp_dir
def __enter__(self):
"""Setup sandboxed dhcp context, unless connectivity_url can already be
@@ -360,9 +342,7 @@ class EphemeralDHCPv4:
"""
if self.lease:
return self.lease
- leases = maybe_perform_dhcp_discovery(
- self.iface, self.dhcp_log_func, self.tmp_dir
- )
+ leases = maybe_perform_dhcp_discovery(self.iface, self.dhcp_log_func)
if not leases:
raise NoDHCPLeaseError()
self.lease = leases[-1]
@@ -426,23 +406,19 @@ class EphemeralIPNetwork:
interface,
ipv6: bool = False,
ipv4: bool = True,
- tmp_dir=None,
):
self.interface = interface
self.ipv4 = ipv4
self.ipv6 = ipv6
self.stack = contextlib.ExitStack()
self.state_msg: str = ""
- self.tmp_dir = tmp_dir
def __enter__(self):
# ipv6 dualstack might succeed when dhcp4 fails
# therefore catch exception unless only v4 is used
try:
if self.ipv4:
- self.stack.enter_context(
- EphemeralDHCPv4(self.interface, tmp_dir=self.tmp_dir)
- )
+ self.stack.enter_context(EphemeralDHCPv4(self.interface))
if self.ipv6:
self.stack.enter_context(EphemeralIPv6Network(self.interface))
# v6 link local might be usable
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index ad586e1e..1c28e16e 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -500,23 +500,3 @@ def available(target=None):
if not subp.which(p, search=search, target=target):
return False
return True
-
-
-def network_state_to_netplan(network_state, header=None):
- # render the provided network state, return a string of equivalent eni
- netplan_path = "etc/network/50-cloud-init.yaml"
- renderer = Renderer(
- {
- "netplan_path": netplan_path,
- "netplan_header": header,
- }
- )
- if not header:
- header = ""
- if not header.endswith("\n"):
- header += "\n"
- contents = renderer._render_content(network_state)
- return header + contents
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index f88b1321..158a2951 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -86,20 +86,14 @@ NET_CONFIG_TO_V2: Dict[str, Dict[str, Any]] = {
def warn_deprecated_all_devices(dikt: dict) -> None:
"""Warn about deprecations of v2 properties for all devices"""
if "gateway4" in dikt or "gateway6" in dikt:
- LOG.warning(
- "DEPRECATED: The use of `gateway4` and `gateway6` is"
- " deprecated. For more info check out: "
- "https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v2.html" # noqa: E501
+ util.deprecate(
+ deprecated="The use of `gateway4` and `gateway6`",
+ deprecated_version="22.4",
+ extra_message="For more info check out: "
+ "https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v2.html", # noqa: E501
)
-def from_state_file(state_file):
- state = util.read_conf(state_file)
- nsi = NetworkStateInterpreter()
- nsi.load(state)
- return nsi
-
-
def diff_keys(expected, actual):
missing = set(expected)
for key in actual:
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index 4fd8a9b8..0c9ece0c 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -80,15 +80,6 @@ class CfgParser:
return contents
- def dump_data(self, target_fn):
- if not target_fn:
- LOG.warning("Target file not given")
- return
-
- contents = self.get_final_conf()
- LOG.debug("Final content: %s", contents)
- util.write_file(target_fn, contents)
-
class Renderer(renderer.Renderer):
"""
@@ -355,7 +346,7 @@ class Renderer(renderer.Renderer):
f" and dhcp{version}-overrides.use-domains"
f" configured. Use one"
)
- raise Exception(exception)
+ raise RuntimeError(exception)
self.parse_dhcp_overrides(cfg, device, dhcp, version)
@@ -371,8 +362,3 @@ def available(target=None):
if not subp.which(p, search=search, target=target):
return False
return True
-
-
-def network_state_to_networkd(ns: NetworkState):
- renderer = Renderer({})
- return renderer._render_content(ns)
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 6bf4703c..72813e32 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -9,7 +9,7 @@ import abc
import io
from typing import Optional
-from cloudinit.net.network_state import NetworkState, parse_net_config_data
+from cloudinit.net.network_state import NetworkState
from cloudinit.net.udev import generate_udev_rule
@@ -17,10 +17,6 @@ def filter_by_type(match_type):
return lambda iface: match_type == iface["type"]
-def filter_by_name(match_name):
- return lambda iface: match_name == iface["name"]
-
-
def filter_by_attr(match_name):
return lambda iface: (match_name in iface and iface[match_name])
@@ -57,18 +53,3 @@ class Renderer:
target=None,
) -> None:
"""Render network state."""
-
- def render_network_config(
- self,
- network_config: dict,
- templates: Optional[dict] = None,
- target=None,
- ):
- return self.render_network_state(
- network_state=parse_net_config_data(network_config),
- templates=templates,
- target=target,
- )
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index d4daa78f..68fe41d8 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -1013,7 +1013,12 @@ class Renderer(renderer.Renderer):
if self.netrules_path:
netrules_content = self._render_persistent_net(network_state)
netrules_path = subp.target_path(target, self.netrules_path)
- util.write_file(netrules_path, netrules_content, file_mode)
+ util.write_file(
+ netrules_path,
+ content=netrules_content,
+ mode=file_mode,
+ preserve_mode=True,
+ )
sysconfig_path = subp.target_path(target, templates.get("control"))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 5eeeb967..a3bca86e 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -94,11 +94,13 @@ def _netdev_info_iproute_json(ipaddr_json):
return devs
+@util.deprecate_call(
+ deprecated_version="22.1",
+ extra_message="Required by old iproute2 versions that don't "
+ "support ip json output. Consider upgrading to a more recent version.",
+)
def _netdev_info_iproute(ipaddr_out):
"""
- DEPRECATED: Only used on distros that don't support ip json output
- Use _netdev_info_iproute_json() when possible.
-
@param ipaddr_out: Output string from 'ip addr show' command.
@returns: A dict of device info keyed by network device name containing
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index b7d3e5a3..807c02c7 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -306,20 +306,6 @@ DEF_EPHEMERAL_LABEL = "Temporary Storage"
DEF_PASSWD_REDACTION = "REDACTED"
-@azure_ds_telemetry_reporter
-def is_platform_viable(seed_dir: Optional[Path]) -> bool:
- """Check platform environment to report if this datasource may run."""
- chassis_tag = ChassisAssetTag.query_system()
- if chassis_tag is not None:
- return True
-
- # If no valid chassis tag, check for seeded ovf-env.xml.
- if seed_dir is None:
- return False
-
- return (seed_dir / "ovf-env.xml").exists()
-
-
class DataSourceAzure(sources.DataSource):
dsname = "Azure"
@@ -402,7 +388,6 @@ class DataSourceAzure(sources.DataSource):
self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
iface=iface,
dhcp_log_func=dhcp_log_cb,
- tmp_dir=self.distro.get_tmp_exec_path(),
)
lease = None
@@ -580,6 +565,12 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
+ # Networking is a hard requirement for source PPS, fail without it.
+ if not self._is_ephemeral_networking_up():
+ msg = "DHCP failed while in source PPS"
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
+
if pps_type == PPSType.SAVABLE:
self._wait_for_all_nics_ready()
elif pps_type == PPSType.OS_DISK:
@@ -680,9 +671,9 @@ class DataSourceAzure(sources.DataSource):
return crawled_data
@azure_ds_telemetry_reporter
- def get_metadata_from_imds(self) -> Dict:
+ def get_metadata_from_imds(self, retries: int = 10) -> Dict:
try:
- return imds.fetch_metadata_with_api_fallback()
+ return imds.fetch_metadata_with_api_fallback(retries=retries)
except (UrlError, ValueError) as error:
report_diagnostic_event(
"Ignoring IMDS metadata due to: %s" % error,
@@ -696,14 +687,27 @@ class DataSourceAzure(sources.DataSource):
self._metadata_imds = sources.UNSET
@azure_ds_telemetry_reporter
+ def ds_detect(self):
+ """Check platform environment to report if this datasource may
+ run.
+ """
+ chassis_tag = ChassisAssetTag.query_system()
+ if chassis_tag is not None:
+ return True
+
+ # If no valid chassis tag, check for seeded ovf-env.xml.
+ if self.seed_dir is None:
+ return False
+
+ return Path(self.seed_dir, "ovf-env.xml").exists()
+
+ @azure_ds_telemetry_reporter
def _get_data(self):
"""Crawl and process datasource metadata caching metadata as attrs.
@return: True on success, False on error, invalid or disabled
datasource.
"""
- if not is_platform_viable(Path(self.seed_dir)):
- return False
try:
get_boot_telemetry()
except Exception as e:
@@ -979,11 +983,8 @@ class DataSourceAzure(sources.DataSource):
raise BrokenAzureDataSource("Shutdown failure for PPS disk.")
@azure_ds_telemetry_reporter
- def _check_if_nic_is_primary(self, ifname):
- """Check if a given interface is the primary nic or not. If it is the
- primary nic, then we also get the expected total nic count from IMDS.
- IMDS will process the request and send a response only for primary NIC.
- """
+ def _check_if_nic_is_primary(self, ifname: str) -> bool:
+ """Check if a given interface is the primary nic or not."""
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
# primary or secondary. In this case, retry DHCP until successful.
@@ -992,18 +993,11 @@ class DataSourceAzure(sources.DataSource):
# Primary nic detection will be optimized in the future. The fact that
# primary nic is being attached first helps here. Otherwise each nic
# could add several seconds of delay.
- imds_md = self.get_metadata_from_imds()
+ imds_md = self.get_metadata_from_imds(retries=300)
if imds_md:
# Only primary NIC will get a response from IMDS.
LOG.info("%s is the primary nic", ifname)
-
- # Set the expected nic count based on the response received.
- expected_nic_count = len(imds_md["interface"])
- report_diagnostic_event(
- "Expected nic count: %d" % expected_nic_count,
- logger_func=LOG.info,
- )
- return True, expected_nic_count
+ return True
# If we are not the primary nic, then clean the dhcp context.
LOG.warning(
@@ -1012,7 +1006,7 @@ class DataSourceAzure(sources.DataSource):
ifname,
)
self._teardown_ephemeral_networking()
- return False, -1
+ return False
@azure_ds_telemetry_reporter
def _wait_for_hot_attached_primary_nic(self, nl_sock):
@@ -1055,9 +1049,7 @@ class DataSourceAzure(sources.DataSource):
# won't be in primary_nic_found = false state for long.
if not primary_nic_found:
LOG.info("Checking if %s is the primary nic", ifname)
- primary_nic_found, _ = self._check_if_nic_is_primary(
- ifname
- )
+ primary_nic_found = self._check_if_nic_is_primary(ifname)
# Exit criteria: check if we've discovered primary nic
if primary_nic_found:
@@ -1109,13 +1101,6 @@ class DataSourceAzure(sources.DataSource):
dhcp_attempts = 0
if report_ready:
- # Networking must be up for netlink to detect
- # media disconnect/connect. It may be down to due
- # initial DHCP failure, if so check for it and retry,
- # ensuring we flag it as required.
- if not self._is_ephemeral_networking_up():
- self._setup_ephemeral_networking(timeout_minutes=20)
-
try:
if (
self._ephemeral_dhcp_ctx is None
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 270a3a18..1dcd7107 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -31,7 +31,8 @@ class DataSourceCloudSigma(sources.DataSource):
self.ssh_public_key = ""
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- def is_running_in_cloudsigma(self):
+ @staticmethod
+ def ds_detect():
"""
Uses dmi data to detect if this instance of cloud-init is running
in the CloudSigma's infrastructure.
@@ -51,8 +52,6 @@ class DataSourceCloudSigma(sources.DataSource):
as userdata.
"""
dsmode = None
- if not self.is_running_in_cloudsigma():
- return False
try:
server_context = self.cepko.all().result
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 52d3ad26..b6a110aa 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -102,7 +102,7 @@ class DataSourceDigitalOcean(sources.DataSource):
interfaces = self.metadata.get("interfaces")
LOG.debug(interfaces)
if not interfaces:
- raise Exception("Unable to get meta-data from server....")
+ raise RuntimeError("Unable to get meta-data from server....")
nameservers = self.metadata_full["dns"]["nameservers"]
self._network_config = do_helper.convert_network_configuration(
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 44665b26..e8fb0023 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -134,7 +134,6 @@ class DataSourceEc2(sources.DataSource):
self.fallback_interface,
ipv4=True,
ipv6=True,
- tmp_dir=self.distro.get_tmp_exec_path(),
) as netw:
state_msg = f" {netw.state_msg}" if netw.state_msg else ""
self._crawled_metadata = util.log_time(
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
index 23478e9e..cf42fdbb 100644
--- a/cloudinit/sources/DataSourceExoscale.py
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -100,9 +100,6 @@ class DataSourceExoscale(sources.DataSource):
Please refer to the datasource documentation for details on how the
metadata server and password server are crawled.
"""
- if not self._is_platform_viable():
- return False
-
data = util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
@@ -142,7 +139,8 @@ class DataSourceExoscale(sources.DataSource):
def get_config_obj(self):
return self.extra_config
- def _is_platform_viable(self):
+ @staticmethod
+ def ds_detect():
return dmi.read_dmi_data("system-product-name").startswith(
EXOSCALE_DMI_NAME
)
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 29548a60..bb44cd1f 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -85,7 +85,6 @@ class DataSourceGCE(sources.DataSource):
if self.perform_dhcp_setup:
network_context = EphemeralDHCPv4(
self.fallback_interface,
- tmp_dir=self.distro.get_tmp_exec_path(),
)
with network_context:
ret = util.log_time(
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index c70a23ce..14f14677 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -61,7 +61,6 @@ class DataSourceHetzner(sources.DataSource):
connectivity_url_data={
"url": BASE_URL_V1 + "/metadata/instance-id",
},
- tmp_dir=self.distro.get_tmp_exec_path(),
):
md = hc_helper.read_metadata(
self.metadata_address,
@@ -130,7 +129,7 @@ class DataSourceHetzner(sources.DataSource):
_net_config = self.metadata["network-config"]
if not _net_config:
- raise Exception("Unable to get meta-data from server....")
+ raise RuntimeError("Unable to get meta-data from server....")
self._network_config = _net_config
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index ab440cc8..2643149b 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -181,16 +181,13 @@ class DataSourceLXD(sources.DataSource):
super()._unpickle(ci_pkl_version)
self.skip_hotplug_detect = True
- def _is_platform_viable(self) -> bool:
+ @staticmethod
+ def ds_detect() -> bool:
"""Check platform environment to report if this datasource may run."""
return is_platform_viable()
def _get_data(self) -> bool:
"""Crawl LXD socket API instance data and return True on success"""
- if not self._is_platform_viable():
- LOG.debug("Not an LXD datasource: No LXD socket found.")
- return False
-
self._crawled_metadata = util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
diff --git a/cloudinit/sources/DataSourceNWCS.py b/cloudinit/sources/DataSourceNWCS.py
index e21383d2..aebbf689 100644
--- a/cloudinit/sources/DataSourceNWCS.py
+++ b/cloudinit/sources/DataSourceNWCS.py
@@ -43,19 +43,10 @@ class DataSourceNWCS(sources.DataSource):
self.dsmode = sources.DSMODE_NETWORK
def _get_data(self):
- LOG.info("Detecting if machine is a NWCS instance")
- on_nwcs = get_nwcs_data()
-
- if not on_nwcs:
- LOG.info("Machine is not a NWCS instance")
- return False
-
- LOG.info("Machine is a NWCS instance")
-
md = self.get_metadata()
if md is None:
- raise Exception("failed to get metadata")
+ raise RuntimeError("failed to get metadata")
self.metadata_full = md
@@ -111,7 +102,7 @@ class DataSourceNWCS(sources.DataSource):
return self._network_config
if not self.metadata["network"]["config"]:
- raise Exception("Unable to get metadata from server")
+ raise RuntimeError("Unable to get metadata from server")
# metadata sends interface names, but we dont want to use them
for i in self.metadata["network"]["config"]:
@@ -125,14 +116,9 @@ class DataSourceNWCS(sources.DataSource):
return self._network_config
-
-def get_nwcs_data():
- vendor_name = dmi.read_dmi_data("system-manufacturer")
-
- if vendor_name != "NWCS":
- return False
-
- return True
+ @staticmethod
+ def ds_detect():
+ return "NWCS" == dmi.read_dmi_data("system-manufacturer")
def get_interface_name(mac):
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index a9744fa1..bcb0927a 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -161,9 +161,6 @@ class OpenNebulaNetwork:
def mac2ip(self, mac):
return ".".join([str(int(c, 16)) for c in mac.split(":")[2:]])
- def mac2network(self, mac):
- return self.mac2ip(mac).rpartition(".")[0] + ".0"
-
def get_nameservers(self, dev):
nameservers = {}
dns = self.get_field(dev, "dns", "").split()
@@ -208,9 +205,6 @@ class OpenNebulaNetwork:
def get_mask(self, dev):
return self.get_field(dev, "mask", "255.255.255.0")
- def get_network(self, dev, mac):
- return self.get_field(dev, "network", self.mac2network(mac))
-
def get_field(self, dev, name, default=None):
"""return the field name in context for device dev.
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 86ed3dd5..c480b627 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -73,7 +73,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
return mstr
- def wait_for_metadata_service(self, max_wait=None, timeout=None):
+ def wait_for_metadata_service(self):
urls = self.ds_cfg.get("metadata_urls", DEF_MD_URLS)
filtered = [x for x in urls if util.is_resolvable_url(x)]
if set(filtered) != set(urls):
@@ -90,23 +90,16 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls = []
url2base = {}
for url in urls:
- # Wait for a specific openstack metadata url
md_url = url_helper.combine_url(url, "openstack")
md_urls.append(md_url)
url2base[md_url] = url
url_params = self.get_url_params()
- if max_wait is None:
- max_wait = url_params.max_wait_seconds
-
- if timeout is None:
- timeout = url_params.timeout_seconds
-
start_time = time.time()
avail_url, _response = url_helper.wait_for_url(
urls=md_urls,
- max_wait=max_wait,
- timeout=timeout,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
connect_synchronously=False,
)
if avail_url:
@@ -157,23 +150,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
False when unable to contact metadata service or when metadata
format is invalid or disabled.
"""
- oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list")
if self.perform_dhcp_setup: # Setup networking in init-local stage.
try:
- with EphemeralDHCPv4(
- self.fallback_interface,
- tmp_dir=self.distro.get_tmp_exec_path(),
- ):
- if not self.detect_openstack(
- accept_oracle=not oracle_considered
- ):
- LOG.debug(
- "OpenStack datasource not running"
- " on OpenStack (dhcp)"
- )
- return False
+ with EphemeralDHCPv4(self.fallback_interface):
results = util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
@@ -183,13 +164,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, str(e))
return False
else:
- if not self.detect_openstack(accept_oracle=not oracle_considered):
- LOG.debug(
- "OpenStack datasource not running"
- " on OpenStack (non-dhcp)"
- )
- return False
-
try:
results = self._crawl_metadata()
except sources.InvalidMetaDataException as e:
@@ -268,8 +242,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
raise sources.InvalidMetaDataException(msg) from e
return result
- def detect_openstack(self, accept_oracle=False):
+ def ds_detect(self):
"""Return True when a potential OpenStack platform is detected."""
+ accept_oracle = "Oracle" in self.sys_cfg.get("datasource_list")
if not util.is_x86():
# Non-Intel cpus don't properly report dmi product names
return True
@@ -283,13 +258,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return True
elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA:
return True
- # On bare metal hardware, the product name is not set like
- # in a virtual OpenStack vm. We check if the system is virtual
- # and if the openstack specific metadata service has been found.
- elif not self.distro.is_virtual and self.wait_for_metadata_service(
- max_wait=15, timeout=5
- ):
- return True
return False
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index 08daa4f6..3baf06e1 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -118,9 +118,9 @@ class DataSourceOracle(sources.DataSource):
vendordata_pure = None
network_config_sources: Tuple[sources.NetworkConfigSource, ...] = (
sources.NetworkConfigSource.CMD_LINE,
+ sources.NetworkConfigSource.SYSTEM_CFG,
sources.NetworkConfigSource.DS,
sources.NetworkConfigSource.INITRAMFS,
- sources.NetworkConfigSource.SYSTEM_CFG,
)
_network_config: dict = {"config": [], "version": 1}
@@ -140,13 +140,12 @@ class DataSourceOracle(sources.DataSource):
def _has_network_config(self) -> bool:
return bool(self._network_config.get("config", []))
- def _is_platform_viable(self) -> bool:
+ @staticmethod
+ def ds_detect() -> bool:
"""Check platform environment to report if this datasource may run."""
return _is_platform_viable()
def _get_data(self):
- if not self._is_platform_viable():
- return False
self.system_uuid = _read_system_uuid()
@@ -156,7 +155,6 @@ class DataSourceOracle(sources.DataSource):
"url": METADATA_PATTERN.format(version=2, path="instance"),
"headers": V2_HEADERS,
},
- tmp_dir=self.distro.get_tmp_exec_path(),
)
fetch_primary_nic = not self._is_iscsi_root()
fetch_secondary_nics = self.ds_cfg.get(
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 6983f275..f45f9b04 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -38,29 +38,6 @@ DEF_MD_RETRIES = 5
DEF_MD_TIMEOUT = 10
-def on_scaleway():
- """
- There are three ways to detect if you are on Scaleway:
-
- * check DMI data: not yet implemented by Scaleway, but the check is made to
- be future-proof.
- * the initrd created the file /var/run/scaleway.
- * "scaleway" is in the kernel cmdline.
- """
- vendor_name = dmi.read_dmi_data("system-manufacturer")
- if vendor_name == "Scaleway":
- return True
-
- if os.path.exists("/var/run/scaleway"):
- return True
-
- cmdline = util.get_cmdline()
- if "scaleway" in cmdline:
- return True
-
- return False
-
-
class SourceAddressAdapter(requests.adapters.HTTPAdapter):
"""
Adapter for requests to choose the local address to bind to.
@@ -203,17 +180,33 @@ class DataSourceScaleway(sources.DataSource):
"vendor-data", self.vendordata_address, self.retries, self.timeout
)
+ @staticmethod
+ def ds_detect():
+ """
+ There are three ways to detect if you are on Scaleway:
+
+ * check DMI data: not yet implemented by Scaleway, but the check is
+ made to be future-proof.
+ * the initrd created the file /var/run/scaleway.
+ * "scaleway" is in the kernel cmdline.
+ """
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name == "Scaleway":
+ return True
+
+ if os.path.exists("/var/run/scaleway"):
+ return True
+
+ cmdline = util.get_cmdline()
+ if "scaleway" in cmdline:
+ return True
+
def _get_data(self):
- if not on_scaleway():
- return False
if self._fallback_interface is None:
self._fallback_interface = net.find_fallback_nic()
try:
- with EphemeralDHCPv4(
- self._fallback_interface,
- tmp_dir=self.distro.get_tmp_exec_path(),
- ):
+ with EphemeralDHCPv4(self._fallback_interface):
util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 266daf68..41f6ec27 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -526,9 +526,6 @@ class JoyentMetadataClient:
).decode()
return self.request(rtype="PUT", param=param)
- def delete(self, key):
- return self.request(rtype="DELETE", param=key)
-
def close_transport(self):
if self.fp:
self.fp.close()
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
index f114dad4..43122f0b 100644
--- a/cloudinit/sources/DataSourceUpCloud.py
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -71,9 +71,7 @@ class DataSourceUpCloud(sources.DataSource):
LOG.debug("Finding a fallback NIC")
nic = cloudnet.find_fallback_nic()
LOG.debug("Discovering metadata via DHCP interface %s", nic)
- with EphemeralDHCPv4(
- nic, tmp_dir=self.distro.get_tmp_exec_path()
- ):
+ with EphemeralDHCPv4(nic):
md = util.log_time(
logfunc=LOG.debug,
msg="Reading from metadata service",
@@ -128,7 +126,9 @@ class DataSourceUpCloud(sources.DataSource):
raw_network_config = self.metadata.get("network")
if not raw_network_config:
- raise Exception("Unable to get network meta-data from server....")
+ raise RuntimeError(
+ "Unable to get network meta-data from server...."
+ )
self._network_config = uc_helper.convert_network_config(
raw_network_config,
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index 9d7c84fb..f7c56780 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -37,12 +37,12 @@ class DataSourceVultr(sources.DataSource):
]
)
+ @staticmethod
+ def ds_detect():
+ return vultr.is_vultr()
+
# Initiate data and check if Vultr
def _get_data(self):
- LOG.debug("Detecting if machine is a Vultr instance")
- if not vultr.is_vultr():
- LOG.debug("Machine is not a Vultr instance")
- return False
LOG.debug("Machine is a Vultr instance")
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 12430401..2779cac4 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,6 +13,7 @@ import copy
import json
import os
import pickle
+import re
from collections import namedtuple
from enum import Enum, unique
from typing import Any, Dict, List, Optional, Tuple
@@ -307,6 +308,47 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def __str__(self):
return type_utils.obj_name(self)
+ def ds_detect(self) -> bool:
+ """Check if running on this datasource"""
+ return True
+
+ def override_ds_detect(self) -> bool:
+ """Override if either:
+ - only a single datasource defined (nothing to fall back to)
+ - commandline argument is used (ci.ds=OpenStack)
+
+ Note: get_cmdline() is required for the general case - when ds-identify
+ does not run, _something_ needs to detect the kernel command line
+ definition.
+ """
+ if self.dsname == parse_cmdline():
+ LOG.debug(
+ "Machine is configured by the kernel commandline to run on "
+ "single datasource %s.",
+ self,
+ )
+ return True
+ elif self.sys_cfg.get("datasource_list", []) in (
+ [self.dsname],
+ [self.dsname, "None"],
+ ):
+ LOG.debug(
+ "Machine is configured to run on single datasource %s.", self
+ )
+ return True
+ return False
+
+ def _check_and_get_data(self):
+ """Overrides runtime datasource detection"""
+ if self.override_ds_detect():
+ return self._get_data()
+ elif self.ds_detect():
+ LOG.debug("Machine is running on %s.", self)
+ return self._get_data()
+ else:
+ LOG.debug("Datasource type %s is not detected.", self)
+ return False
+
def _get_standardized_metadata(self, instance_data):
"""Return a dictionary of standardized metadata keys."""
local_hostname = self.get_hostname().hostname
@@ -370,7 +412,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
Minimally, the datasource should return a boolean True on success.
"""
self._dirty_cache = True
- return_value = self._get_data()
+ return_value = self._check_and_get_data()
if not return_value:
return return_value
self.persist_instance_data()
@@ -868,10 +910,6 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def network_config(self):
return None
- @property
- def first_instance_boot(self):
- return
-
def setup(self, is_new_instance):
"""setup(is_new_instance)
@@ -1111,4 +1149,13 @@ def pkl_load(fname: str) -> Optional[DataSource]:
return None
-# vi: ts=4 expandtab
+def parse_cmdline():
+ """Check if command line argument for this datasource was passed
+ Passing by command line overrides runtime datasource detection
+ """
+ cmdline = util.get_cmdline()
+ ds_parse_1 = re.search(r"ci\.ds=([a-zA-Z]+)(\s|$)", cmdline)
+ ds_parse_2 = re.search(r"ci\.datasource=([a-zA-Z]+)(\s|$)", cmdline)
+ ds = ds_parse_1 or ds_parse_2
+ if ds:
+ return ds.group(1)
diff --git a/cloudinit/sources/azure/imds.py b/cloudinit/sources/azure/imds.py
index 54fc9a05..5e4046d0 100644
--- a/cloudinit/sources/azure/imds.py
+++ b/cloudinit/sources/azure/imds.py
@@ -2,7 +2,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import functools
from typing import Dict
import requests
@@ -10,25 +9,69 @@ import requests
from cloudinit import log as logging
from cloudinit import util
from cloudinit.sources.helpers.azure import report_diagnostic_event
-from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
+from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
IMDS_URL = "http://169.254.169.254/metadata"
-_readurl_exception_callback = functools.partial(
- retry_on_url_exc,
- retry_codes=(
- 404, # not found (yet)
- 410, # gone / unavailable (yet)
- 429, # rate-limited/throttled
- 500, # server error
- ),
- retry_instances=(
- requests.ConnectionError,
- requests.Timeout,
- ),
-)
+
+class ReadUrlRetryHandler:
+ def __init__(
+ self,
+ *,
+ retry_codes=(
+ 404, # not found (yet)
+ 410, # gone / unavailable (yet)
+ 429, # rate-limited/throttled
+ 500, # server error
+ ),
+ max_connection_errors: int = 10,
+ logging_backoff: float = 1.0,
+ ) -> None:
+ self.logging_backoff = logging_backoff
+ self.max_connection_errors = max_connection_errors
+ self.retry_codes = retry_codes
+ self._logging_threshold = 1.0
+ self._request_count = 0
+
+ def exception_callback(self, req_args, exception) -> bool:
+ self._request_count += 1
+ if not isinstance(exception, UrlError):
+ report_diagnostic_event(
+ "Polling IMDS failed with unexpected exception: %r"
+ % (exception),
+ logger_func=LOG.warning,
+ )
+ return False
+
+ log = True
+ retry = True
+
+ # Check for connection errors which may occur early boot, but
+ # are otherwise indicative that we are not connecting with the
+ # primary NIC.
+ if isinstance(
+ exception.cause, (requests.ConnectionError, requests.Timeout)
+ ):
+ self.max_connection_errors -= 1
+ if self.max_connection_errors < 0:
+ retry = False
+ elif exception.code not in self.retry_codes:
+ retry = False
+
+ if self._request_count >= self._logging_threshold:
+ self._logging_threshold *= self.logging_backoff
+ else:
+ log = False
+
+ if log or not retry:
+ report_diagnostic_event(
+ "Polling IMDS failed attempt %d with exception: %r"
+ % (self._request_count, exception),
+ logger_func=LOG.info,
+ )
+ return retry
def _fetch_url(
@@ -38,11 +81,12 @@ def _fetch_url(
:raises UrlError: on error fetching metadata.
"""
+ handler = ReadUrlRetryHandler()
try:
response = readurl(
url,
- exception_cb=_readurl_exception_callback,
+ exception_cb=handler.exception_callback,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=log_response,
@@ -61,13 +105,14 @@ def _fetch_url(
def _fetch_metadata(
url: str,
+ retries: int = 10,
) -> Dict:
"""Fetch IMDS metadata.
:raises UrlError: on error fetching metadata.
:raises ValueError: on error parsing metadata.
"""
- metadata = _fetch_url(url)
+ metadata = _fetch_url(url, retries=retries)
try:
return util.load_json(metadata)
@@ -79,7 +124,7 @@ def _fetch_metadata(
raise
-def fetch_metadata_with_api_fallback() -> Dict:
+def fetch_metadata_with_api_fallback(retries: int = 10) -> Dict:
"""Fetch extended metadata, falling back to non-extended as required.
:raises UrlError: on error fetching metadata.
@@ -87,7 +132,7 @@ def fetch_metadata_with_api_fallback() -> Dict:
"""
try:
url = IMDS_URL + "/instance?api-version=2021-08-01&extended=true"
- return _fetch_metadata(url)
+ return _fetch_metadata(url, retries=retries)
except UrlError as error:
if error.code == 400:
report_diagnostic_event(
@@ -95,7 +140,7 @@ def fetch_metadata_with_api_fallback() -> Dict:
logger_func=LOG.warning,
)
url = IMDS_URL + "/instance?api-version=2019-06-01"
- return _fetch_metadata(url)
+ return _fetch_metadata(url, retries=retries)
raise
@@ -106,43 +151,17 @@ def fetch_reprovision_data() -> bytes:
"""
url = IMDS_URL + "/reprovisiondata?api-version=2019-06-01"
- logging_threshold = 1
- poll_counter = 0
-
- def exception_callback(msg, exception):
- nonlocal logging_threshold
- nonlocal poll_counter
-
- poll_counter += 1
- if not isinstance(exception, UrlError):
- report_diagnostic_event(
- "Polling IMDS failed with unexpected exception: %r"
- % (exception),
- logger_func=LOG.warning,
- )
- return False
-
- log = True
- retry = False
- if exception.code in (404, 410):
- retry = True
- if poll_counter >= logging_threshold:
- # Exponential back-off on logging.
- logging_threshold *= 2
- else:
- log = False
-
- if log:
- report_diagnostic_event(
- "Polling IMDS failed with exception: %r count: %d"
- % (exception, poll_counter),
- logger_func=LOG.info,
- )
- return retry
-
+ handler = ReadUrlRetryHandler(
+ logging_backoff=2.0,
+ max_connection_errors=0,
+ retry_codes=(
+ 404,
+ 410,
+ ),
+ )
response = readurl(
url,
- exception_cb=exception_callback,
+ exception_cb=handler.exception_callback,
headers={"Metadata": "true"},
infinite=True,
log_req_resp=False,
@@ -150,7 +169,7 @@ def fetch_reprovision_data() -> bytes:
)
report_diagnostic_event(
- f"Polled IMDS {poll_counter+1} time(s)",
+ f"Polled IMDS {handler._request_count+1} time(s)",
logger_func=LOG.debug,
)
return response.contents
diff --git a/cloudinit/sources/helpers/cloudsigma.py b/cloudinit/sources/helpers/cloudsigma.py
index 5d39946f..1d6a1b45 100644
--- a/cloudinit/sources/helpers/cloudsigma.py
+++ b/cloudinit/sources/helpers/cloudsigma.py
@@ -53,10 +53,6 @@ class Cepko:
request_pattern = self.request_pattern.format("/meta/{}")
return self.get(key, request_pattern)
- def global_context(self, key=""):
- request_pattern = self.request_pattern.format("/global_context/{}")
- return self.get(key, request_pattern)
-
class CepkoResult:
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index df9e5c4b..a129d9a8 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -50,11 +50,6 @@ class Config:
return self._configFile.get(Config.TIMEZONE, None)
@property
- def utc(self):
- """Retrieves whether to set time to UTC or Local."""
- return self._configFile.get(Config.UTC, None)
-
- @property
def admin_password(self):
"""Return the root password to be set."""
return self._configFile.get(Config.PASS, None)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 37185cba..9f868389 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -69,39 +69,6 @@ class ConfigFile(ConfigSource, dict):
for (key, value) in config.items(category):
self._insertKey(category + "|" + key, value)
- def should_keep_current_value(self, key):
- """
- Determines whether a value for a property must be kept.
-
- If the propery is missing, it is treated as it should be not
- changed by the engine.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "empty" value which is used to indicate
- # "removal"
- return key not in self
-
- def should_remove_current_value(self, key):
- """
- Determines whether a value for the property must be removed.
-
- If the specified key is empty, it is treated as it should be
- removed by the engine.
-
- Return true if the value can be removed, false otherwise.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "missing" value which is used to indicate
- # "keeping unchanged"
- if key in self:
- return not bool(self[key])
- else:
- return False
-
def get_count_with_prefix(self, prefix):
"""
Return the total count of keys that start with the specified prefix.
@@ -110,6 +77,3 @@ class ConfigFile(ConfigSource, dict):
prefix -- prefix of the key
"""
return len([key for key in self if key.startswith(prefix)])
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
deleted file mode 100644
index d44f4c01..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
-
-
-class ConfigNamespace(ConfigSource):
- """Specifies the Config Namespace."""
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 7b9e0974..ba2488be 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -62,7 +62,7 @@ class NicConfigurator:
if not primary_nics:
return None
elif len(primary_nics) > 1:
- raise Exception(
+ raise RuntimeError(
"There can only be one primary nic",
[nic.mac for nic in primary_nics],
)
@@ -230,16 +230,6 @@ class NicConfigurator:
return (subnet_list, route_list)
- def _genIpv6Route(self, name, nic, addrs):
- route_list = []
-
- for addr in addrs:
- route_list.append(
- {"type": "route", "gateway": addr.gateway, "metric": 10000}
- )
-
- return route_list
-
def generate(self, configure=False, osfamily=None):
"""Return the config elements that are needed to configure the nics"""
if configure:
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 6ffbae40..92e71370 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -1,5 +1,5 @@
# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016-2022 VMware Inc.
+# Copyright (C) 2016-2023 VMware Inc.
#
# Author: Sankar Tanguturi <stanguturi@vmware.com>
# Pengpeng Sun <pegnpengs@vmware.com>
@@ -11,14 +11,10 @@ import os
import re
import time
-from cloudinit import subp, util
+from cloudinit import safeyaml, subp, util
from .config import Config
-from .config_custom_script import (
- CustomScriptNotFound,
- PostCustomScript,
- PreCustomScript,
-)
+from .config_custom_script import PostCustomScript, PreCustomScript
from .config_file import ConfigFile
from .config_nic import NicConfigurator
from .config_passwd import PasswordConfigurator
@@ -265,6 +261,17 @@ def get_data_from_imc_raw_data_cust_cfg(cust_cfg):
)
return (None, None, None)
+ try:
+ logger.debug("Validating if meta data is valid or not")
+ md = safeyaml.load(md)
+ except safeyaml.YAMLError as e:
+ set_cust_error_status(
+ "Error parsing the cloud-init meta data",
+ str(e),
+ GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT,
+ cust_cfg,
+ )
+
ud_file = cust_cfg.user_data_name
if ud_file:
ud_path = os.path.join(get_imc_dir_path(), ud_file)
@@ -512,7 +519,7 @@ def do_pre_custom_script(cust_cfg, custom_script, cust_cfg_dir):
try:
precust = PreCustomScript(custom_script, cust_cfg_dir)
precust.execute()
- except CustomScriptNotFound as e:
+ except Exception as e:
set_cust_error_status(
"Error executing pre-customization script",
str(e),
@@ -527,7 +534,7 @@ def do_post_custom_script(cust_cfg, custom_script, cust_cfg_dir, ccScriptsDir):
try:
postcust = PostCustomScript(custom_script, cust_cfg_dir, ccScriptsDir)
postcust.execute()
- except CustomScriptNotFound as e:
+ except Exception as e:
set_cust_error_status(
"Error executing post-customization script",
str(e),
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
deleted file mode 100644
index f290a36f..00000000
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-
-class Ipv4ModeEnum:
- """
- The IPv4 configuration mode which directly represents the user's goal.
-
- This mode effectively acts as a contract of the in-guest customization
- engine. It must be set based on what the user has requested and should
- not be changed by those layers. It's up to the in-guest engine to
- interpret and materialize the user's request.
- """
-
- # The legacy mode which only allows dhcp/static based on whether IPv4
- # addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE"
-
- # IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = "STATIC"
-
- # IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = "DHCP"
-
- # IPv4 must be disabled
- IPV4_MODE_DISABLED = "DISABLED"
-
- # IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = "AS_IS"
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index 836108d4..a6d5cea7 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -26,9 +26,7 @@ def get_metadata(url, timeout, retries, sec_between, agent, tmp_dir=None):
for iface in get_interface_list():
try:
with EphemeralDHCPv4(
- iface=iface,
- connectivity_url_data={"url": url},
- tmp_dir=tmp_dir,
+ iface=iface, connectivity_url_data={"url": url}
):
# Check for the metadata route, skip if not there
if not check_route(url):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 9494a0bf..65f952e7 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -535,9 +535,6 @@ class Init:
]
return def_handlers
- def _default_userdata_handlers(self):
- return self._default_handlers()
-
def _default_vendordata_handlers(self):
return self._default_handlers(
opts={
@@ -758,10 +755,11 @@ class Init:
return
if isinstance(enabled, str):
- LOG.debug(
- "Use of string '%s' for 'vendor_data:enabled' field "
- "is deprecated. Use boolean value instead",
- enabled,
+ util.deprecate(
+ deprecated=f"Use of string '{enabled}' for "
+ "'vendor_data:enabled' field",
+ deprecated_version="23.1",
+ extra_message="Use boolean value instead.",
)
LOG.debug(
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 4d712829..ed6b9063 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -15,14 +15,17 @@
import collections
import re
import sys
-from typing import Type
+from typing import Any
from cloudinit import log as logging
from cloudinit import type_utils as tu
from cloudinit import util
from cloudinit.atomic_helper import write_file
-JUndefined: Type
+# After bionic EOL, mypy==1.0.0 will be able to type-analyse dynamic
+# base types, substitute this by:
+# JUndefined: typing.Type
+JUndefined: Any
try:
from jinja2 import DebugUndefined as _DebugUndefined
from jinja2 import Template as JTemplate
@@ -41,7 +44,7 @@ MISSING_JINJA_PREFIX = "CI_MISSING_JINJA_VAR/"
# Mypy, and the PEP 484 ecosystem in general, does not support creating
# classes with dynamic base types: https://stackoverflow.com/a/59636248
-class UndefinedJinjaVariable(JUndefined): # type: ignore
+class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
def __str__(self):
@@ -149,12 +152,6 @@ def render_to_file(fn, outfn, params, mode=0o644):
util.write_file(outfn, contents, mode=mode)
-def render_string_to_file(content, outfn, params, mode=0o644):
- """Render string"""
- contents = render_string(content, params)
- util.write_file(outfn, contents, mode=mode)
-
-
def render_string(content, params):
"""Render string"""
if not params:
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 3336b23d..5374ec8a 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -69,7 +69,7 @@ def _set_filename(msg, filename):
def _handle_error(error_message, source_exception=None):
if features.ERROR_ON_USER_DATA_FAILURE:
- raise Exception(error_message) from source_exception
+ raise RuntimeError(error_message) from source_exception
else:
LOG.warning(error_message)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 8ba3e2b6..fc777b82 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -11,6 +11,7 @@
import contextlib
import copy as obj_copy
import email
+import functools
import glob
import grp
import gzip
@@ -33,16 +34,18 @@ import sys
import time
from base64 import b64decode, b64encode
from collections import deque, namedtuple
+from contextlib import suppress
from errno import EACCES, ENOENT
from functools import lru_cache, total_ordering
from pathlib import Path
-from typing import Callable, Deque, Dict, List, TypeVar
+from typing import Callable, Deque, Dict, List, Optional, TypeVar
from urllib import parse
from cloudinit import features, importer
from cloudinit import log as logging
from cloudinit import (
mergers,
+ net,
safeyaml,
subp,
temp_utils,
@@ -1232,8 +1235,8 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
return fqdn
-def is_resolvable(name):
- """determine if a url is resolvable, return a boolean
+def is_resolvable(url) -> bool:
+ """determine if a url's network address is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
@@ -1245,6 +1248,8 @@ def is_resolvable(name):
be resolved inside the search list.
"""
global _DNS_REDIRECT_IP
+ parsed_url = parse.urlparse(url)
+ name = parsed_url.hostname
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = (
@@ -1252,7 +1257,7 @@ def is_resolvable(name):
"example.invalid.",
"__cloud_init_expected_not_found__",
)
- badresults = {}
+ badresults: dict = {}
for iname in badnames:
try:
result = socket.getaddrinfo(
@@ -1269,12 +1274,14 @@ def is_resolvable(name):
LOG.debug("detected dns redirection: %s", badresults)
try:
+ # ip addresses need no resolution
+ with suppress(ValueError):
+ if net.is_ip_address(parsed_url.netloc.strip("[]")):
+ return True
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
addr = result[0][4][0]
- if addr in _DNS_REDIRECT_IP:
- return False
- return True
+ return addr not in _DNS_REDIRECT_IP
except (socket.gaierror, socket.error):
return False
@@ -1297,7 +1304,7 @@ def is_resolvable_url(url):
logfunc=LOG.debug,
msg="Resolving URL: " + url,
func=is_resolvable,
- args=(parse.urlparse(url).hostname,),
+ args=(url,),
)
@@ -1516,12 +1523,6 @@ def blkid(devs=None, disable_cache=False):
return ret
-def peek_file(fname, max_bytes):
- LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
- with open(fname, "rb") as ifh:
- return ifh.read(max_bytes)
-
-
def uniq_list(in_list):
out_list = []
for i in in_list:
@@ -3097,3 +3098,84 @@ class Version(namedtuple("Version", ["major", "minor", "patch", "rev"])):
if self.rev > other.rev:
return 1
return -1
+
+
+def deprecate(
+ *,
+ deprecated: str,
+ deprecated_version: str,
+ extra_message: Optional[str] = None,
+ schedule: int = 5,
+):
+ """Mark a "thing" as deprecated. Deduplicated deprecations are
+ logged.
+
+ @param deprecated: Noun to be deprecated. Write this as the start
+ of a sentence, with no period. Version and extra message will
+ be appended.
+ @param deprecated_version: The version in which the thing was
+ deprecated
+ @param extra_message: A remedy for the user's problem. A good
+ message will be actionable and specific (i.e., don't use a
+ generic "Use updated key." if the user used a deprecated key).
+ End the string with a period.
+ @param schedule: Manually set the deprecation schedule. Defaults to
+ 5 years. Leave a comment explaining your reason for deviation if
+ setting this value.
+
+ Note: uses keyword-only arguments to improve legibility
+ """
+ if not hasattr(deprecate, "_log"):
+ deprecate._log = set() # type: ignore
+ message = extra_message or ""
+ dedup = hash(deprecated + message + deprecated_version + str(schedule))
+ version = Version.from_str(deprecated_version)
+ version_removed = Version(version.major + schedule, version.minor)
+ if dedup not in deprecate._log: # type: ignore
+ deprecate._log.add(dedup) # type: ignore
+ deprecate_msg = (
+ f"{deprecated} is deprecated in "
+ f"{deprecated_version} and scheduled to be removed in "
+ f"{version_removed}. {message}"
+ ).rstrip()
+ if hasattr(LOG, "deprecated"):
+ LOG.deprecated(deprecate_msg)
+ else:
+ LOG.warning(deprecate_msg)
+
+
+def deprecate_call(
+ *, deprecated_version: str, extra_message: str, schedule: int = 5
+):
+ """Mark a "thing" as deprecated. Deduplicated deprecations are
+ logged.
+
+ @param deprecated_version: The version in which the thing was
+ deprecated
+ @param extra_message: A remedy for the user's problem. A good
+ message will be actionable and specific (i.e., don't use a
+ generic "Use updated key." if the user used a deprecated key).
+ End the string with a period.
+ @param schedule: Manually set the deprecation schedule. Defaults to
+ 5 years. Leave a comment explaining your reason for deviation if
+ setting this value.
+
+ Note: uses keyword-only arguments to improve legibility
+ """
+
+ def wrapper(func):
+ @functools.wraps(func)
+ def decorator(*args, **kwargs):
+ # don't log message multiple times
+ out = func(*args, **kwargs)
+ deprecate(
+ deprecated_version=deprecated_version,
+ deprecated=func.__name__,
+ extra_message=extra_message,
+ schedule=schedule,
+ )
+ return out
+
+ return decorator
+
+ return wrapper
diff --git a/cloudinit/version.py b/cloudinit/version.py
index fd83ebf6..a9bfffed 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "23.1"
+__VERSION__ = "23.1.1"
_PACKAGED_VERSION = "@@PACKAGED_VERSION@@"
FEATURES = [
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 9b5df6b0..43b34418 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -16,11 +16,6 @@ datasource:
- http://169.254.169.254:80
- http://instance-data:8773
- OpenStack:
- # The default list of metadata services to check for OpenStack.
- metadata_urls:
- - http://169.254.169.254
-
MAAS:
timeout : 50
max_wait : 120
diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1
index 388617de..beca40c6 100644
--- a/doc/man/cloud-init.1
+++ b/doc/man/cloud-init.1
@@ -58,10 +58,6 @@ Remove logs and artifacts so cloud-init can re-run.
Run development tools. See help output for subcommand details.
.TP
-.B "dhclient-hook"
-Run the dhclient hook to record network info.
-
-.TP
.B "features"
List defined features.
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index a4103a7e..c8d460ab 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -1,3 +1,4 @@
+import datetime
import os
import sys
@@ -18,7 +19,7 @@ sys.path.insert(0, os.path.abspath("."))
# General information about the project.
project = "cloud-init"
-copyright = "Canonical Ltd."
+copyright = f"Canonical Group Ltd, {datetime.date.today().year}"
# -- General configuration ----------------------------------------------------
@@ -71,15 +72,46 @@ copybutton_only_copy_prompt_lines = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-html_static_path = ["static"]
html_theme_options = {
"light_logo": "logo.png",
"dark_logo": "logo-dark-mode.png",
+ "light_css_variables": {
+ "font-stack": "Ubuntu, -apple-system, Segoe UI, Roboto, Oxygen, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif",
+ "font-stack--monospace": "Ubuntu Mono variable, Ubuntu Mono, Consolas, Monaco, Courier, monospace",
+ "color-foreground-primary": "#111",
+ "color-foreground-secondary": "var(--color-foreground-primary)",
+ "color-foreground-muted": "#333",
+ "color-background-secondary": "#FFF",
+ "color-background-hover": "#f2f2f2",
+ "color-brand-primary": "#111",
+ "color-brand-content": "#06C",
+ "color-inline-code-background": "rgba(0,0,0,.03)",
+ "color-sidebar-link-text": "#111",
+ "color-sidebar-item-background--current": "#ebebeb",
+ "color-sidebar-item-background--hover": "#f2f2f2",
+ "sidebar-item-line-height": "1.3rem",
+ "color-link-underline": "var(--color-background-primary)",
+ "color-link-underline--hover": "var(--color-background-primary)",
+ },
+ "dark_css_variables": {
+ "color-foreground-secondary": "var(--color-foreground-primary)",
+ "color-foreground-muted": "#CDCDCD",
+ "color-background-secondary": "var(--color-background-primary)",
+ "color-background-hover": "#666",
+ "color-brand-primary": "#fff",
+ "color-brand-content": "#06C",
+ "color-sidebar-link-text": "#f7f7f7",
+ "color-sidebar-item-background--current": "#666",
+ "color-sidebar-item-background--hover": "#333",
+ },
}
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_static_path = ["static"]
+html_css_files = ["css/custom.css"]
+
html_extra_path = ["googleaf254801a5285c31.html"]
# Make sure the target is unique
diff --git a/doc/rtd/development/module_creation.rst b/doc/rtd/development/module_creation.rst
index cb46d7cf..9c873cca 100644
--- a/doc/rtd/development/module_creation.rst
+++ b/doc/rtd/development/module_creation.rst
@@ -15,8 +15,7 @@ Example
# This file is part of cloud-init. See LICENSE file for license information.
"""Example Module: Shows how to create a module"""
- from logging import Logger
-
+ import logging
from cloudinit.cloud import Cloud
from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
@@ -29,6 +28,8 @@ Example
This will likely take multiple lines.
"""
+ LOG = logging.getLogger(__name__)
+
meta: MetaSchema = {
"id": "cc_example",
"name": "Example Module",
@@ -47,9 +48,9 @@ Example
def handle(
- name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+ name: str, cfg: Config, cloud: Cloud, args: list
) -> None:
- log.debug(f"Hi from module {name}")
+ LOG.debug(f"Hi from module {name}")
.. _module_creation-Guidelines:
@@ -66,7 +67,6 @@ Guidelines
* ``cloud``: A cloud object that can be used to access various datasource
and paths for the given distro and data provided by the various datasource
instance types.
- * ``log``: A logger object that can be used to log messages.
* ``args``: An argument list. This is usually empty and is only populated
if the module is called independently from the command line or if the
module definition in :file:`/etc/cloud/cloud.cfg[.d]` has been modified
diff --git a/doc/rtd/explanation/instancedata.rst b/doc/rtd/explanation/instancedata.rst
index 8f5b310a..12c53853 100644
--- a/doc/rtd/explanation/instancedata.rst
+++ b/doc/rtd/explanation/instancedata.rst
@@ -112,7 +112,7 @@ Example: User data script with ``instance-data``
## template: jinja
#!/bin/bash
{% if v1.region == 'us-east-2' -%}
- echo 'Installing custom proxies for {{ v1.region }}
+ echo 'Installing custom proxies for {{ v1.region }}'
sudo apt-get install my-xtra-fast-stack
{%- endif %}
...
@@ -343,7 +343,7 @@ used. The format of subplatform will be:
Example output:
- - metadata (http://168.254.169.254)
+ - metadata (http://169.254.169.254)
- seed-dir (/path/to/seed-dir/)
- config-disk (/dev/cd0)
- configdrive (/dev/sr0)
diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst
index 246b9721..75beb8ec 100644
--- a/doc/rtd/reference/cli.rst
+++ b/doc/rtd/reference/cli.rst
@@ -16,7 +16,7 @@ Example output:
.. code-block::
usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force]
- {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema} ...
+ {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} ...
options:
-h, --help show this help message and exit
@@ -27,12 +27,11 @@ Example output:
--force Force running even if no datasource is found (use at your own risk).
Subcommands:
- {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema}
+ {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema}
init Initialize cloud-init and perform initial modules.
modules Activate modules using a given configuration key.
single Run a single module.
query Query standardized instance metadata from the command line.
- dhclient-hook Run the dhclient hook to record network info.
features List defined features.
analyze Devel tool: Analyze cloud-init logs and data.
devel Run development tools.
diff --git a/doc/rtd/reference/faq.rst b/doc/rtd/reference/faq.rst
index 87aade59..ba741741 100644
--- a/doc/rtd/reference/faq.rst
+++ b/doc/rtd/reference/faq.rst
@@ -235,6 +235,20 @@ to their respective support channels. For Subiquity autoinstaller that is via
IRC (``#ubuntu-server`` on Libera) or Discourse. For Juju support see their
`discourse page`_.
+
+Can I use cloud-init as a library?
+==================================
+Yes, in fact some projects `already do`_. However, ``cloud-init`` does not
+currently make any API guarantees to external consumers - current library
+users are projects that have close contact with ``cloud-init``, which is why
+this model currently works.
+
+It is worth mentioning for library users that ``cloud-init`` defines a custom
+log level. This log level, ``35``, is dedicated to logging info
+related to deprecation information. Users of ``cloud-init`` as a library
+may wish to ensure that this log level doesn't collide with external
+libraries that define their own custom log levels.
+
Where can I learn more?
=======================
@@ -279,6 +293,7 @@ Whitepapers:
.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py
.. _Juju: https://ubuntu.com/blog/topics/juju
.. _discourse page: https://discourse.charmhub.io
+.. _already do: https://github.com/canonical/ubuntu-advantage-client/blob/9b46480b9e4b88e918bac5ced0d4b8edb3cbbeab/lib/auto_attach.py#L35
.. _cloud-init - The Good Parts: https://www.youtube.com/watch?v=2_m6EUo6VOI
.. _Utilising cloud-init on Microsoft Azure (Whitepaper): https://ubuntu.com/engage/azure-cloud-init-whitepaper
diff --git a/doc/rtd/static/css/custom.css b/doc/rtd/static/css/custom.css
new file mode 100644
index 00000000..a6a1f527
--- /dev/null
+++ b/doc/rtd/static/css/custom.css
@@ -0,0 +1,248 @@
+/** Fix the font weight (300 for normal, 400 for slightly bold) **/
+/** Should be 100 for all headers, 400 for normal text **/
+
+h1, h2, h3, h4, h5, h6, .sidebar-tree .current-page>.reference, button, input, optgroup, select, textarea, th.head {
+ font-weight: 200;
+}
+
+.toc-title {
+ font-weight: 400;
+}
+
+div.page, li.scroll-current>.reference, dl.glossary dt, dl.simple dt, dl:not([class]) dt {
+ font-weight: 300;
+ line-height: 1.5;
+ font-size: var(--font-size--normal);
+}
+
+/** Semantic markup styling **/
+strong.command {
+ font-family: var(--font-stack--monospace);
+ font-size: var(--font-size--medium);
+ background: var(--color-inline-code-background);
+ padding: 0.1em 0.2em;
+}
+
+/** Side bars (side-bar tree = left, toc-tree = right) **/
+div.sidebar-tree {
+ font-weight: 200;
+ line-height: 1.5;
+ font-size: var(--font-size--normal);
+}
+
+div.toc-tree {
+ font-weight: 200;
+ font-size: var(--font-size--medium);
+ line-height: 1.5;
+}
+
+.sidebar-tree .toctree-l1>.reference, .toc-tree li.scroll-current>.reference {
+ font-weight: 400;
+}
+
+/** List styling **/
+ol, ul {
+ margin-bottom: 1.5rem;
+ margin-left: 1rem;
+ margin-top: 0;
+ padding-left: 1rem;
+}
+
+/** Table styling **/
+
+th.head {
+ text-transform: uppercase;
+ font-size: var(--font-size--small);
+}
+
+table.docutils {
+ border: 0;
+ box-shadow: none;
+ width:100%;
+}
+
+table.docutils td, table.docutils th, table.docutils td:last-child, table.docutils th:last-child, table.docutils td:first-child, table.docutils th:first-child {
+ border-right: none;
+ border-left: none;
+}
+
+/* center align table cells with ":-:" */
+td.text-center {
+ text-align: center;
+}
+
+/** No rounded corners **/
+
+.admonition, code.literal, .sphinx-tabs-tab, .sphinx-tabs-panel, .highlight {
+ border-radius: 0;
+}
+
+/** code blocks and literals **/
+code.docutils.literal.notranslate, .highlight pre, pre.literal-block {
+ font-size: var(--font-size--medium);
+ border: none;
+}
+
+
+/** Admonition styling **/
+
+.admonition {
+ font-size: var(--font-size--medium);
+ box-shadow: none;
+}
+
+/** Styling for links **/
+/* unvisited link */
+a:link {
+ color: #06c;
+ text-decoration: none;
+}
+
+/* visited link */
+a:visited {
+ color: #7d42b8;
+ text-decoration: none;
+}
+
+/* mouse over link */
+a:hover {
+ text-decoration: underline;
+}
+
+/* selected link */
+a:active {
+ text-decoration: underline;
+}
+
+a.sidebar-brand.centered {
+ text-decoration: none;
+}
+
+/** Color for the "copy link" symbol next to headings **/
+
+a.headerlink {
+ color: var(--color-brand-primary);
+}
+
+/** Line to the left of the current navigation entry **/
+
+.sidebar-tree li.current-page {
+ border-left: 2px solid var(--color-brand-primary);
+}
+
+/** Some tweaks for issue #16 **/
+
+[role="tablist"] {
+ border-bottom: 1px solid var(--color-sidebar-item-background--hover);
+}
+
+.sphinx-tabs-tab[aria-selected="true"] {
+ border: 0;
+ border-bottom: 2px solid var(--color-brand-primary);
+ background-color: var(--color-sidebar-item-background--current);
+ font-weight:300;
+}
+
+.sphinx-tabs-tab{
+ color: var(--color-brand-primary);
+ font-weight:300;
+}
+
+.sphinx-tabs-panel {
+ border: 0;
+ border-bottom: 1px solid var(--color-sidebar-item-background--hover);
+ background: var(--color-background-primary);
+}
+
+button.sphinx-tabs-tab:hover {
+ background-color: var(--color-sidebar-item-background--hover);
+}
+
+/** Custom classes to fix scrolling in tables by decreasing the
+ font size or breaking certain columns.
+ Specify the classes in the Markdown file with, for example:
+ ```{rst-class} break-col-4 min-width-4-8
+ ```
+**/
+
+table.dec-font-size {
+ font-size: smaller;
+}
+table.break-col-1 td.text-left:first-child {
+ word-break: break-word;
+}
+table.break-col-4 td.text-left:nth-child(4) {
+ word-break: break-word;
+}
+table.min-width-1-15 td.text-left:first-child {
+ min-width: 15em;
+}
+table.min-width-4-8 td.text-left:nth-child(4) {
+ min-width: 8em;
+}
+
+/** Underline for abbreviations **/
+
+abbr[title] {
+ text-decoration: underline solid #cdcdcd;
+}
+
+/** Use the same style for right-details as for left-details **/
+.bottom-of-page .right-details {
+ font-size: var(--font-size--small);
+ display: block;
+}
+
+/** Version switcher */
+button.version_select {
+ color: var(--color-foreground-primary);
+ background-color: var(--color-toc-background);
+ padding: 5px 10px;
+ border: none;
+}
+
+.version_select:hover, .version_select:focus {
+ background-color: var(--color-sidebar-item-background--hover);
+}
+
+.version_dropdown {
+ position: relative;
+ display: inline-block;
+ text-align: right;
+ font-size: var(--sidebar-item-font-size);
+}
+
+.available_versions {
+ display: none;
+ position: absolute;
+ right: 0px;
+ background-color: var(--color-toc-background);
+ box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
+ z-index: 11;
+}
+
+.available_versions a {
+ color: var(--color-foreground-primary);
+ padding: 12px 16px;
+ text-decoration: none;
+ display: block;
+}
+
+.available_versions a:hover {background-color: var(--color-sidebar-item-background--current)}
+
+.show {display:block;}
+
+/** Fix for nested numbered list - the nested list is lettered **/
+ol.arabic ol.arabic {
+ list-style: lower-alpha;
+}
+
+/** Make expandable sections look like links **/
+details summary {
+ color: var(--color-link);
+}
+
+/** Context links at the bottom of the page **/
+footer, .page-info .context {
+ font-size: var(--font-size--medium);
+}
diff --git a/packages/bddeb b/packages/bddeb
index 44d82a78..2eee6003 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -4,6 +4,7 @@ import argparse
import csv
import json
import os
+import re
import shutil
import subprocess
import sys
@@ -16,11 +17,17 @@ def find_root():
top_dir = os.environ.get("CLOUD_INIT_TOP_D", None)
if top_dir is None:
top_dir = os.path.dirname(
- os.path.dirname(os.path.abspath(sys.argv[0])))
- if os.path.isfile(os.path.join(top_dir, 'setup.py')):
+ os.path.dirname(os.path.abspath(sys.argv[0]))
+ )
+ if os.path.isfile(os.path.join(top_dir, "setup.py")):
return os.path.abspath(top_dir)
- raise OSError(("Unable to determine where your cloud-init topdir is."
- " set CLOUD_INIT_TOP_D?"))
+ raise OSError(
+ (
+ "Unable to determine where your cloud-init topdir is."
+ " set CLOUD_INIT_TOP_D?"
+ )
+ )
+
if "avoid-pep8-E402-import-not-top-of-file":
# Use the util functions from cloudinit
@@ -47,20 +54,24 @@ def get_release_suffix(release):
if os.path.exists(csv_path):
with open(csv_path, "r") as fp:
# version has "16.04 LTS" or "16.10", so drop "LTS" portion.
- rels = {row['series']: row['version'].replace(' LTS', '')
- for row in csv.DictReader(fp)}
+ rels = {
+ row["series"]: row["version"].replace(" LTS", "")
+ for row in csv.DictReader(fp)
+ }
if release in rels:
return "~%s.1" % rels[release]
elif release != UNRELEASED:
- print("missing distro-info-data package, unable to give "
- "per-release suffix.\n")
+ print(
+ "missing distro-info-data package, unable to give "
+ "per-release suffix.\n"
+ )
return ""
def run_helper(helper, args=None, strip=True):
if args is None:
args = []
- cmd = [util.abs_join(find_root(), 'tools', helper)] + args
+ cmd = [util.abs_join(find_root(), "tools", helper)] + args
(stdout, _stderr) = subp.subp(cmd)
if strip:
stdout = stdout.strip()
@@ -71,43 +82,56 @@ def write_debian_folder(root, templ_data, cloud_util_deps):
"""Create a debian package directory with all rendered template files."""
print("Creating a debian/ folder in %r" % (root))
- deb_dir = util.abs_join(root, 'debian')
+ deb_dir = util.abs_join(root, "debian")
# Just copy debian/ dir and then update files
- pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
- subp.subp(['cp', '-a', pdeb_d, deb_dir])
+ pdeb_d = util.abs_join(find_root(), "packages", "debian")
+ subp.subp(["cp", "-a", pdeb_d, deb_dir])
# Fill in the change log template
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'changelog.in'),
- util.abs_join(deb_dir, 'changelog'),
- params=templ_data)
+ templater.render_to_file(
+ util.abs_join(find_root(), "packages", "debian", "changelog.in"),
+ util.abs_join(deb_dir, "changelog"),
+ params=templ_data,
+ )
# Write out the control file template
- reqs_output = run_helper(
- 'read-dependencies', args=['--distro', 'debian'])
+ reqs_output = run_helper("read-dependencies", args=["--distro", "debian"])
reqs = reqs_output.splitlines()
test_reqs = run_helper(
- 'read-dependencies',
- ['--requirements-file', 'test-requirements.txt',
- '--system-pkg-names']).splitlines()
+ "read-dependencies",
+ ["--requirements-file", "test-requirements.txt", "--system-pkg-names"],
+ ).splitlines()
- requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else []
+ requires = ["cloud-utils | cloud-guest-utils"] if cloud_util_deps else []
# We consolidate all deps as Build-Depends as our package build runs all
# tests so we need all runtime dependencies anyway.
# NOTE: python package was moved to the front after debuild -S would fail with
# 'Please add apropriate interpreter' errors (as in debian bug 861132)
- requires.extend(['python3'] + reqs + test_reqs)
- if templ_data['debian_release'] == 'xenial':
- requires.append('python3-pytest-catchlog')
- elif templ_data['debian_release'] in (
- 'buster', 'xenial', 'bionic', 'focal'
+ requires.extend(["python3"] + reqs + test_reqs)
+ if templ_data["debian_release"] in (
+ "buster",
+ "bionic",
+ "focal",
):
- requires.append('dh-systemd')
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'control.in'),
- util.abs_join(deb_dir, 'control'),
- params={'build_depends': ','.join(requires)})
+ requires.append("dh-systemd")
+ build_deps = ",".join(requires)
+ (stdout, _stderr) = subp.subp(
+ ["dpkg-query", "-W", "-f='${Provides}'", "debhelper"]
+ )
+ # Get latest debhelper-compat support on host
+ debhelper_matches = re.findall(r"(debhelper-compat \(= \d+\)),", stdout)
+ if debhelper_matches:
+ if templ_data["debian_release"] == "bionic":
+ # Bionic doesn't support debhelper-compat > 11
+ build_deps += ",debhelper-compat (= 11)"
+ else:
+ build_deps += f",{debhelper_matches[-1]}"
+ templater.render_to_file(
+ util.abs_join(find_root(), "packages", "debian", "control.in"),
+ util.abs_join(deb_dir, "control"),
+ params={"build_depends": build_deps},
+ )
def write_debian_folder_from_branch(root, templ_data, branch):
@@ -118,8 +142,7 @@ def write_debian_folder_from_branch(root, templ_data, branch):
["git", "archive", branch, "debian"], stdout=subprocess.PIPE
)
subprocess.check_call(
- ["tar", "-v", "-C", root, "-x"],
- stdin=p_dumpdeb.stdout
+ ["tar", "-v", "-C", root, "-x"], stdin=p_dumpdeb.stdout
)
print("Adding new entry to debian/changelog")
@@ -136,55 +159,83 @@ def write_debian_folder_from_branch(root, templ_data, branch):
"--controlmaint",
"Snapshot build.",
],
- cwd=root
+ cwd=root,
)
def read_version():
- return json.loads(run_helper('read-version', ['--json']))
+ return json.loads(run_helper("read-version", ["--json"]))
def get_parser():
"""Setup and return an argument parser for bdeb tool."""
parser = argparse.ArgumentParser()
- parser.add_argument("-v", "--verbose", dest="verbose",
- help=("run verbosely"
- " (default: %(default)s)"),
- default=False,
- action='store_true')
- parser.add_argument("--cloud-utils", dest="cloud_utils",
- help=("depend on cloud-utils package"
- " (default: %(default)s)"),
- default=False,
- action='store_true')
-
- parser.add_argument("--init-system", dest="init_system",
- help=("build deb with INIT_SYSTEM=xxx"
- " (default: %(default)s"),
- default=os.environ.get("INIT_SYSTEM", "systemd"))
-
- parser.add_argument("--release", dest="release",
- help=("build with changelog referencing RELEASE"),
- default=UNRELEASED)
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ dest="verbose",
+ help=("run verbosely" " (default: %(default)s)"),
+ default=False,
+ action="store_true",
+ )
+ parser.add_argument(
+ "--cloud-utils",
+ dest="cloud_utils",
+ help=("depend on cloud-utils package" " (default: %(default)s)"),
+ default=False,
+ action="store_true",
+ )
- for ent in DEBUILD_ARGS:
- parser.add_argument(ent, dest="debuild_args", action='append_const',
- const=ent, default=[],
- help=("pass through '%s' to debuild" % ent))
+ parser.add_argument(
+ "--init-system",
+ dest="init_system",
+ help=("build deb with INIT_SYSTEM=xxx" " (default: %(default)s"),
+ default=os.environ.get("INIT_SYSTEM", "systemd"),
+ )
- parser.add_argument("--sign", default=False, action='store_true',
- help="sign result. do not pass -us -uc to debuild")
+ parser.add_argument(
+ "--release",
+ dest="release",
+ help=("build with changelog referencing RELEASE"),
+ default=UNRELEASED,
+ )
- parser.add_argument("--signuser", default=False, action='store',
- help="user to sign, see man dpkg-genchanges")
+ for ent in DEBUILD_ARGS:
+ parser.add_argument(
+ ent,
+ dest="debuild_args",
+ action="append_const",
+ const=ent,
+ default=[],
+ help=("pass through '%s' to debuild" % ent),
+ )
+
+ parser.add_argument(
+ "--sign",
+ default=False,
+ action="store_true",
+ help="sign result. do not pass -us -uc to debuild",
+ )
- parser.add_argument("--packaging-branch", nargs="?", metavar="BRANCH",
- const="ubuntu/devel", type=str,
- help=(
- "Import packaging from %(metavar)s instead of"
- " using the packages/debian/* templates"
- " (default: %(const)s)"
- ))
+ parser.add_argument(
+ "--signuser",
+ default=False,
+ action="store",
+ help="user to sign, see man dpkg-genchanges",
+ )
+
+ parser.add_argument(
+ "--packaging-branch",
+ nargs="?",
+ metavar="BRANCH",
+ const="ubuntu/devel",
+ type=str,
+ help=(
+ "Import packaging from %(metavar)s instead of"
+ " using the packages/debian/* templates"
+ " (default: %(const)s)"
+ ),
+ )
return parser
@@ -225,35 +276,36 @@ def main():
return 1
if not args.sign:
- args.debuild_args.extend(['-us', '-uc'])
+ args.debuild_args.extend(["-us", "-uc"])
if args.signuser:
- args.debuild_args.extend(['-e%s' % args.signuser])
+ args.debuild_args.extend(["-e%s" % args.signuser])
- os.environ['INIT_SYSTEM'] = args.init_system
+ os.environ["INIT_SYSTEM"] = args.init_system
capture = True
if args.verbose:
capture = False
templ_data = {
- 'debian_release': args.release,
- 'release_suffix': get_release_suffix(args.release)}
+ "debian_release": args.release,
+ "release_suffix": get_release_suffix(args.release),
+ }
with temp_utils.tempdir() as tdir:
# output like 0.7.6-1022-g36e92d3
ver_data = read_version()
- if ver_data['is_release_branch_ci']:
+ if ver_data["is_release_branch_ci"]:
# If we're performing CI for a new release branch, we don't yet
# have the tag required to generate version_long; use version
# instead.
- ver_data['version_long'] = ver_data['version']
+ ver_data["version_long"] = ver_data["version"]
# This is really only a temporary archive
# since we will extract it then add in the debian
# folder, then re-archive it for debian happiness
- tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long']
+ tarball = "cloud-init_%s.orig.tar.gz" % ver_data["version_long"]
tarball_fp = util.abs_join(tdir, tarball)
path = None
for pd in ("./", "../", "../dl/"):
@@ -264,15 +316,20 @@ def main():
break
if path is None:
print("Creating a temp tarball using the 'make-tarball' helper")
- run_helper('make-tarball',
- ['--version', ver_data['version_long'],
- '--output=' + tarball_fp])
+ run_helper(
+ "make-tarball",
+ [
+ "--version",
+ ver_data["version_long"],
+ "--output=" + tarball_fp,
+ ],
+ )
print("Extracting temporary tarball %r" % (tarball))
- cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
+ cmd = ["tar", "-xvzf", tarball_fp, "-C", tdir]
subp.subp(cmd, capture=capture)
- xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long'])
+ xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data["version_long"])
templ_data.update(ver_data)
if args.packaging_branch:
@@ -284,36 +341,37 @@ def main():
xdir, templ_data, cloud_util_deps=args.cloud_utils
)
- print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args),
- xdir))
+ print(
+ "Running 'debuild %s' in %r" % (" ".join(args.debuild_args), xdir)
+ )
with util.chdir(xdir):
- cmd = ['debuild', '--preserve-envvar', 'INIT_SYSTEM']
+ cmd = ["debuild", "--preserve-envvar", "INIT_SYSTEM"]
if args.debuild_args:
cmd.extend(args.debuild_args)
subp.subp(cmd, capture=capture)
- link_fn = os.path.join(os.getcwd(), 'cloud-init_all.deb')
- link_dsc = os.path.join(os.getcwd(), 'cloud-init.dsc')
+ link_fn = os.path.join(os.getcwd(), "cloud-init_all.deb")
+ link_dsc = os.path.join(os.getcwd(), "cloud-init.dsc")
for base_fn in os.listdir(os.path.join(tdir)):
full_fn = os.path.join(tdir, base_fn)
if not os.path.isfile(full_fn):
continue
shutil.move(full_fn, base_fn)
print("Wrote %r" % (base_fn))
- if base_fn.endswith('_all.deb'):
+ if base_fn.endswith("_all.deb"):
# Add in the local link
util.del_file(link_fn)
os.symlink(base_fn, link_fn)
- print("Linked %r to %r" % (base_fn,
- os.path.basename(link_fn)))
- if base_fn.endswith('.dsc'):
+ print("Linked %r to %r" % (base_fn, os.path.basename(link_fn)))
+ if base_fn.endswith(".dsc"):
util.del_file(link_dsc)
os.symlink(base_fn, link_dsc)
- print("Linked %r to %r" % (base_fn,
- os.path.basename(link_dsc)))
+ print(
+ "Linked %r to %r" % (base_fn, os.path.basename(link_dsc))
+ )
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/packages/debian/compat b/packages/debian/compat
deleted file mode 100644
index ec635144..00000000
--- a/packages/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/packages/debian/rules b/packages/debian/rules
index d138deeb..b9b8eaff 100755
--- a/packages/debian/rules
+++ b/packages/debian/rules
@@ -1,26 +1,26 @@
#!/usr/bin/make -f
+
+include /usr/share/dpkg/pkg-info.mk
+
INIT_SYSTEM ?= systemd
export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
-DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version)
%:
- dh $@ --with python3,systemd --buildsystem pybuild
-
-override_dh_install:
- dh_install
- install -d debian/cloud-init/etc/rsyslog.d
- install -d debian/cloud-init/usr/share/apport/package-hooks
- cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
- install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
- install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
- flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement}
+ dh $@ --with python3 --buildsystem pybuild
override_dh_auto_test:
ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
- http_proxy= make check
+ http_proxy= make PYVER=python3 check
else
@echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS)
endif
-override_dh_systemd_start:
- dh_systemd_start --no-restart-on-upgrade --no-start
+override_dh_installsystemd:
+ dh_installsystemd --no-restart-on-upgrade --no-start
+
+override_dh_auto_install:
+ dh_auto_install --destdir=debian/cloud-init
+ install -D -m 0644 ./tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
+ install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
+ install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
+ flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement}
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 5cbf828a..97e95096 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -179,9 +179,6 @@ fi
%attr(0755, root, root) %{_initddir}/cloud-init
%endif
-%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
-%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
-
# Program binaries
%{_bindir}/cloud-init*
%{_bindir}/cloud-id*
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 2586f248..62a9129b 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -126,8 +126,6 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
# Bash completion script
%{_datadir}/bash-completion/completions/cloud-init
-%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
-%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf
# Python code is here...
diff --git a/setup.py b/setup.py
index 04aae5b2..a6dbc5c2 100644
--- a/setup.py
+++ b/setup.py
@@ -316,11 +316,6 @@ if not platform.system().endswith("BSD"):
data_files.extend(
[
- (
- ETC + "/NetworkManager/dispatcher.d/",
- ["tools/hook-network-manager"],
- ),
- (ETC + "/dhcp/dhclient-exit-hooks.d/", ["tools/hook-dhclient"]),
(RULES_PATH + "/udev/rules.d", [f for f in glob("udev/*.rules")]),
(
ETC + "/systemd/system/sshd-keygen@.service.d/",
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 308ffedd..945a5fb6 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -402,7 +402,7 @@ class OpenstackCloud(IntegrationCloud):
try:
UUID(image.image_id)
except ValueError as e:
- raise Exception(
+ raise RuntimeError(
"When using Openstack, `OS_IMAGE` MUST be specified with "
"a 36-character UUID image ID. Passing in a release name is "
"not valid here.\n"
diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py
index 0930309b..fd8c13cf 100644
--- a/tests/integration_tests/cmd/test_schema.py
+++ b/tests/integration_tests/cmd/test_schema.py
@@ -19,7 +19,7 @@ class TestSchemaDeprecations:
def test_clean_log(self, class_client: IntegrationInstance):
log = class_client.read_from_file("/var/log/cloud-init.log")
verify_clean_log(log, ignore_deprecations=True)
- assert "WARNING]: Deprecated cloud-config provided:" in log
+ assert "DEPRECATED]: Deprecated cloud-config provided:" in log
assert "apt_reboot_if_required: Default: ``false``. Deprecated " in log
assert "apt_update: Default: ``false``. Deprecated in version" in log
assert "apt_upgrade: Default: ``false``. Deprecated in version" in log
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 782ca7e5..fabeb608 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -65,7 +65,7 @@ def pytest_runtest_setup(item):
unsupported_message = "Cannot run on platform {}".format(current_platform)
if "no_container" in test_marks:
if "lxd_container" in test_marks:
- raise Exception(
+ raise RuntimeError(
"lxd_container and no_container marks simultaneously set "
"on test"
)
diff --git a/tests/integration_tests/datasources/test_detect_openstack.py b/tests/integration_tests/datasources/test_detect_openstack.py
new file mode 100644
index 00000000..c70e9815
--- /dev/null
+++ b/tests/integration_tests/datasources/test_detect_openstack.py
@@ -0,0 +1,43 @@
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_use_exec
+def test_lxd_datasource_kernel_override(client: IntegrationInstance):
+ """This test is twofold: it tests kernel commandline override, which also
+ validates OpenStack Ironic requirements. OpenStack Ironic does not
+ advertise itself to cloud-init via any of the conventional methods: DMI,
+ etc.
+
+ On systemd, ds-identify is able to grok kernel commandline, however to
+ support cloud-init kernel command line parsing on non-systemd, parsing
+ kernel commandline in Python code is required.
+
+ This test runs on LXD, but forces cloud-init to attempt to run OpenStack.
+ This will inevitably fail on LXD, but we only care that it tried - on
+ Ironic it will succeed.
+
+ Configure grub's kernel command line to tell cloud-init to use OpenStack
+ - even though LXD should naturally be detected.
+ """
+ client.execute(
+ "sed --in-place "
+ '\'s/^.*GRUB_CMDLINE_LINUX=.*$/GRUB_CMDLINE_LINUX="ci.ds=OpenStack"/g'
+ "' /etc/default/grub"
+ )
+
+ # We should probably include non-systemd distros at some point. This should
+ # most likely be as simple as updating the output path for grub-mkconfig
+ client.execute("grub-mkconfig -o /boot/efi/EFI/ubuntu/grub.cfg")
+ client.execute("cloud-init clean --logs")
+ client.instance.shutdown()
+ client.instance.execute_via_ssh = False
+ client.instance.start()
+ client.execute("cloud-init status --wait")
+ log = client.execute("cat /var/log/cloud-init.log")
+ assert (
+ "Machine is configured by the kernel commandline to run on single "
+ "datasource DataSourceOpenStackLocal"
+ ) in log
diff --git a/tests/integration_tests/datasources/test_oci_networking.py b/tests/integration_tests/datasources/test_oci_networking.py
index f569650e..dc0d343b 100644
--- a/tests/integration_tests/datasources/test_oci_networking.py
+++ b/tests/integration_tests/datasources/test_oci_networking.py
@@ -116,3 +116,42 @@ def test_oci_networking_iscsi_instance_secondary_vnics(
)
assert len(expected_interfaces) + 1 == len(configured_interfaces)
assert client.execute("ping -c 2 canonical.com").ok
+
+
+SYSTEM_CFG = """\
+network:
+ ethernets:
+ id0:
+ dhcp4: true
+ dhcp6: true
+ match:
+ name: "ens*"
+ version: 2
+"""
+
+
+def customize_netcfg(
+ client: IntegrationInstance,
+ tmpdir,
+):
+ cfg = tmpdir.join("net.cfg")
+ with open(cfg, "w") as f:
+ f.write(SYSTEM_CFG)
+ client.push_file(cfg, "/etc/cloud/cloud.cfg.d/50-network-test.cfg")
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+@pytest.mark.oci
+def test_oci_networking_system_cfg(client: IntegrationInstance, tmpdir):
+ customize_netcfg(client, tmpdir)
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ assert (
+ "Applying network configuration from system_cfg" in log
+ ), "network source used wasn't system_cfg"
+ netplan_yaml = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ netplan_cfg = yaml.safe_load(netplan_yaml)
+ expected_netplan_cfg = yaml.safe_load(SYSTEM_CFG)
+ assert expected_netplan_cfg == netplan_cfg
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index cf2bf4cc..7bdf05d0 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -75,7 +75,7 @@ class IntegrationInstance:
def execute(self, command, *, use_sudo=True) -> Result:
if self.instance.username == "root" and use_sudo is False:
- raise Exception("Root user cannot run unprivileged")
+ raise RuntimeError("Root user cannot run unprivileged")
return self.instance.execute(command, use_sudo=use_sudo)
def pull_file(self, remote_path, local_path):
@@ -139,7 +139,7 @@ class IntegrationInstance:
elif source == CloudInitSource.UPGRADE:
self.upgrade_cloud_init()
else:
- raise Exception(
+ raise RuntimeError(
"Specified to install {} which isn't supported here".format(
source
)
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 647e8728..8481b454 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -29,6 +29,10 @@ from tests.integration_tests.util import (
USER_DATA = """\
#cloud-config
+users:
+- default
+- name: craig
+ sudo: false # make sure craig doesn't get elevated perms
apt:
primary:
- arches: [default]
@@ -56,7 +60,7 @@ rsyslog:
content: |
module(load="imtcp")
input(type="imtcp" port="514")
- $template RemoteLogs,"/var/tmp/rsyslog.log"
+ $template RemoteLogs,"/var/spool/rsyslog/cloudinit.log"
*.* ?RemoteLogs
& ~
remotes:
@@ -113,6 +117,14 @@ class TestCombined:
assert re.search(expected, log)
+ def test_deprecated_message(self, class_client: IntegrationInstance):
+ """Check that deprecated key produces a log warning"""
+ client = class_client
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Deprecated cloud-config provided" in log
+ assert "The value of 'false' in user craig's 'sudo' config is " in log
+ assert 2 == log.count("DEPRECATE")
+
def test_ntp_with_apt(self, class_client: IntegrationInstance):
"""LP #1628337.
@@ -163,7 +175,9 @@ class TestCombined:
def test_rsyslog(self, class_client: IntegrationInstance):
"""Test rsyslog is configured correctly."""
client = class_client
- assert "My test log" in client.read_from_file("/var/tmp/rsyslog.log")
+ assert "My test log" in client.read_from_file(
+ "/var/spool/rsyslog/cloudinit.log"
+ )
def test_runcmd(self, class_client: IntegrationInstance):
"""Test runcmd works as expected"""
diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py
index e9ad0bb8..903febd7 100644
--- a/tests/unittests/cmd/test_main.py
+++ b/tests/unittests/cmd/test_main.py
@@ -112,7 +112,7 @@ class TestMain(FilesystemMockingTestCase):
subcommand="init",
)
- def set_hostname(name, cfg, cloud, log, args):
+ def set_hostname(name, cfg, cloud, args):
self.assertEqual("set-hostname", name)
updated_cfg = copy.deepcopy(self.cfg)
updated_cfg.update(
@@ -132,7 +132,6 @@ class TestMain(FilesystemMockingTestCase):
updated_cfg.pop("system_info")
self.assertEqual(updated_cfg, cfg)
- self.assertEqual(main.LOG, log)
self.assertIsNone(args)
(_item1, item2) = wrap_and_call(
diff --git a/tests/unittests/config/test_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py
index 52964e10..fb630719 100644
--- a/tests/unittests/config/test_apt_configure_sources_list_v1.py
+++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py
@@ -3,7 +3,6 @@
""" test_handler_apt_configure_sources_list
Test templating of sources list
"""
-import logging
import os
import shutil
import tempfile
@@ -15,8 +14,6 @@ from cloudinit.distros.debian import Distro
from tests.unittests import helpers as t_help
from tests.unittests.util import get_cloud
-LOG = logging.getLogger(__name__)
-
YAML_TEXT_CUSTOM_SL = """
apt_mirror: http://archive.ubuntu.com/ubuntu/
apt_custom_sources_list: |
@@ -97,9 +94,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
templater, "render_string", return_value="fake"
) as mockrnd:
with mock.patch.object(util, "rename"):
- cc_apt_configure.handle(
- "test", cfg, mycloud, LOG, None
- )
+ cc_apt_configure.handle("test", cfg, mycloud, None)
mockisfile.assert_any_call(
"/etc/cloud/templates/sources.list.%s.tmpl" % distro
@@ -135,7 +130,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
@staticmethod
def myresolve(name):
"""Fake util.is_resolvable for mirrorfail tests"""
- if name == "does.not.exist":
+ if "does.not.exist" in name:
print("Faking FAIL for '%s'" % name)
return False
else:
@@ -155,8 +150,8 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
],
"http://httpredir.debian.org/debian",
)
- mockresolve.assert_any_call("does.not.exist")
- mockresolve.assert_any_call("httpredir.debian.org")
+ mockresolve.assert_any_call("http://does.not.exist")
+ mockresolve.assert_any_call("http://httpredir.debian.org/debian")
def test_apt_v1_srcl_ubuntu_mirrorfail(self):
"""Test rendering of a source.list from template for ubuntu"""
@@ -168,8 +163,8 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
["http://does.not.exist", "http://archive.ubuntu.com/ubuntu/"],
"http://archive.ubuntu.com/ubuntu/",
)
- mockresolve.assert_any_call("does.not.exist")
- mockresolve.assert_any_call("archive.ubuntu.com")
+ mockresolve.assert_any_call("http://does.not.exist")
+ mockresolve.assert_any_call("http://archive.ubuntu.com/ubuntu/")
def test_apt_v1_srcl_custom(self):
"""Test rendering from a custom source.list template"""
@@ -182,9 +177,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
with mock.patch.object(
Distro, "get_primary_arch", return_value="amd64"
):
- cc_apt_configure.handle(
- "notimportant", cfg, mycloud, LOG, None
- )
+ cc_apt_configure.handle("notimportant", cfg, mycloud, None)
mockwrite.assert_called_once_with(
"/etc/apt/sources.list", EXPECTED_CONVERTED_CONTENT, mode=420
diff --git a/tests/unittests/config/test_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py
index d9ec6f74..82ae5547 100644
--- a/tests/unittests/config/test_apt_configure_sources_list_v3.py
+++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py
@@ -3,7 +3,6 @@
""" test_apt_custom_sources_list
Test templating of custom sources list
"""
-import logging
import os
import shutil
import tempfile
@@ -17,8 +16,6 @@ from cloudinit.distros.debian import Distro
from tests.unittests import helpers as t_help
from tests.unittests.util import get_cloud
-LOG = logging.getLogger(__name__)
-
TARGET = "/"
# Input and expected output for the custom template
@@ -129,7 +126,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
mock_shouldcfg = stack.enter_context(
mock.patch(cfg_func, return_value=(cfg_on_empty, "test"))
)
- cc_apt_configure.handle("test", cfg, mycloud, LOG, None)
+ cc_apt_configure.handle("test", cfg, mycloud, None)
return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg
@@ -185,7 +182,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
with mock.patch.object(
util, "system_is_snappy", return_value=True
) as mock_issnappy:
- cc_apt_configure.handle("test", cfg, mycloud, LOG, None)
+ cc_apt_configure.handle("test", cfg, mycloud, None)
self.assertEqual(0, mock_writefile.call_count)
self.assertEqual(1, mock_issnappy.call_count)
@@ -233,9 +230,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
with mock.patch.object(
Distro, "get_primary_arch", return_value="amd64"
):
- cc_apt_configure.handle(
- "notimportant", cfg, mycloud, LOG, None
- )
+ cc_apt_configure.handle("notimportant", cfg, mycloud, None)
calls = [
call(
diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py
index f107f964..93c6ed6a 100644
--- a/tests/unittests/config/test_apt_source_v1.py
+++ b/tests/unittests/config/test_apt_source_v1.py
@@ -111,7 +111,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf(cfg)
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
self.assertTrue(os.path.isfile(filename))
@@ -266,7 +266,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf(cfg)
params = self._get_default_params()
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
self.assertTrue(os.path.isfile(filename))
@@ -357,7 +357,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf(cfg)
with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
# check if it added the right number of keys
calls = []
@@ -483,7 +483,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf([cfg])
with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
# check if it added the right amount of keys
sources = cfg["apt"]["sources"]
@@ -544,7 +544,7 @@ class TestAptSourceConfig(TestCase):
cfg = {"key": "fakekey 4242", "filename": self.aptlistfile}
cfg = self.wrapv1conf([cfg])
with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
calls = (
call(
@@ -568,7 +568,7 @@ class TestAptSourceConfig(TestCase):
subp, "subp", return_value=("fakekey 1212", "")
):
with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
calls = (
call(
@@ -597,7 +597,7 @@ class TestAptSourceConfig(TestCase):
with mock.patch.object(
gpg, "getkeybyid", return_value=expectedkey
) as mockgetkey:
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
if is_hardened is not None:
mockkey.assert_called_with(
expectedkey, self.aptlistfile, hardened=is_hardened
@@ -643,7 +643,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf([cfg])
with mock.patch.object(subp, "subp") as mockobj:
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
mockobj.assert_called_once_with(
[
"add-apt-repository",
@@ -673,7 +673,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
with mock.patch.object(subp, "subp") as mockobj:
- cc_apt_configure.handle("test", cfg, self.cloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None)
calls = [
call(
[
diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py
index 8d7ba5dc..1813000e 100644
--- a/tests/unittests/config/test_apt_source_v3.py
+++ b/tests/unittests/config/test_apt_source_v3.py
@@ -963,11 +963,11 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
with mock.patch.object(util, "is_resolvable") as mockresolve:
util.is_resolvable_url("http://1.2.3.4/ubuntu")
- mockresolve.assert_called_with("1.2.3.4")
+ mockresolve.assert_called_with("http://1.2.3.4/ubuntu")
with mock.patch.object(util, "is_resolvable") as mockresolve:
util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
- mockresolve.assert_called_with("us.archive.ubuntu.com")
+ mockresolve.assert_called_with("http://us.archive.ubuntu.com/ubuntu")
# former tests can leave this set (or not if the test is ran directly)
# do a hard reset to ensure a stable result
@@ -984,7 +984,6 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
)
mocksock.assert_any_call("example.invalid.", None, 0, 0, 1, 2)
mocksock.assert_any_call("us.archive.ubuntu.com", None)
- mocksock.assert_any_call("1.2.3.4", None)
self.assertTrue(ret)
self.assertTrue(ret2)
diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py
index bd8ec9bf..b009054f 100644
--- a/tests/unittests/config/test_cc_ansible.py
+++ b/tests/unittests/config/test_cc_ansible.py
@@ -301,12 +301,12 @@ class TestAnsible:
mocker.patch.dict(M_PATH + "os.environ", clear=True)
if exception:
with raises(exception):
- cc_ansible.handle("", cfg, get_cloud(), None, None)
+ cc_ansible.handle("", cfg, get_cloud(), None)
else:
cloud = get_cloud(mocked_distro=True)
cloud.distro.pip_package_name = "python3-pip"
install = cfg["ansible"]["install_method"]
- cc_ansible.handle("", cfg, cloud, None, None)
+ cc_ansible.handle("", cfg, cloud, None)
if install == "distro":
cloud.distro.install_packages.assert_called_once()
cloud.distro.install_packages.assert_called_with(
@@ -404,7 +404,7 @@ class TestAnsible:
@mock.patch(M_PATH + "validate_config")
def test_do_not_run(self, m_validate):
"""verify that if ansible key not included, don't do anything"""
- cc_ansible.handle("", {}, get_cloud(), None, None) # pyright: ignore
+ cc_ansible.handle("", {}, get_cloud(), None) # pyright: ignore
assert not m_validate.called
@mock.patch(
@@ -429,7 +429,7 @@ class TestAnsible:
@mock.patch(M_PATH + "subp", return_value=("stdout", "stderr"))
@mock.patch(M_PATH + "which", return_value=True)
def test_ansible_env_var(self, m_which, m_subp):
- cc_ansible.handle("", CFG_FULL_PULL, get_cloud(), mock.Mock(), [])
+ cc_ansible.handle("", CFG_FULL_PULL, get_cloud(), [])
# python 3.8 required for Mock.call_args.kwargs dict attribute
if isinstance(m_subp.call_args.kwargs, dict):
diff --git a/tests/unittests/config/test_cc_apk_configure.py b/tests/unittests/config/test_cc_apk_configure.py
index 85dd028f..273f7e83 100644
--- a/tests/unittests/config/test_cc_apk_configure.py
+++ b/tests/unittests/config/test_cc_apk_configure.py
@@ -4,7 +4,6 @@
Test creation of repositories file
"""
-import logging
import os
import re
import textwrap
@@ -35,7 +34,6 @@ class TestNoConfig(FilesystemMockingTestCase):
self.add_patch(CC_APK + "._write_repositories_file", "m_write_repos")
self.name = "apk-configure"
self.cloud_init = None
- self.log = logging.getLogger("TestNoConfig")
self.args = []
def test_no_config(self):
@@ -45,9 +43,7 @@ class TestNoConfig(FilesystemMockingTestCase):
"""
config = util.get_builtin_cfg()
- cc_apk_configure.handle(
- self.name, config, self.cloud_init, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud_init, self.args)
self.assertEqual(0, self.m_write_repos.call_count)
@@ -62,7 +58,6 @@ class TestConfig(FilesystemMockingTestCase):
self.paths = helpers.Paths({"templates_dir": self.new_root})
self.name = "apk-configure"
self.cloud = cloud.Cloud(None, self.paths, None, None, None)
- self.log = logging.getLogger("TestNoConfig")
self.args = []
@mock.patch(CC_APK + "._write_repositories_file")
@@ -73,9 +68,7 @@ class TestConfig(FilesystemMockingTestCase):
"""
config = {"apk_repos": {}}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
self.assertEqual(0, m_write_repos.call_count)
@@ -86,9 +79,7 @@ class TestConfig(FilesystemMockingTestCase):
"""
config = {"apk_repos": {"alpine_repo": []}}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
self.assertEqual(0, m_write_repos.call_count)
@@ -99,9 +90,7 @@ class TestConfig(FilesystemMockingTestCase):
alpine_version = "v3.12"
config = {"apk_repos": {"alpine_repo": {"version": alpine_version}}}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
expected_content = textwrap.dedent(
"""\
@@ -135,9 +124,7 @@ class TestConfig(FilesystemMockingTestCase):
}
}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
expected_content = textwrap.dedent(
"""\
@@ -173,9 +160,7 @@ class TestConfig(FilesystemMockingTestCase):
}
}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
expected_content = textwrap.dedent(
"""\
@@ -215,9 +200,7 @@ class TestConfig(FilesystemMockingTestCase):
}
}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
expected_content = textwrap.dedent(
"""\
@@ -256,9 +239,7 @@ class TestConfig(FilesystemMockingTestCase):
}
}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
expected_content = textwrap.dedent(
"""\
@@ -305,9 +286,7 @@ class TestConfig(FilesystemMockingTestCase):
}
}
- cc_apk_configure.handle(
- self.name, config, self.cloud, self.log, self.args
- )
+ cc_apk_configure.handle(self.name, config, self.cloud, self.args)
expected_content = textwrap.dedent(
"""\
diff --git a/tests/unittests/config/test_cc_apt_pipelining.py b/tests/unittests/config/test_cc_apt_pipelining.py
index 0f72d32b..332be28c 100644
--- a/tests/unittests/config/test_cc_apt_pipelining.py
+++ b/tests/unittests/config/test_cc_apt_pipelining.py
@@ -17,14 +17,14 @@ class TestAptPipelining:
@mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file")
def test_not_disabled_by_default(self, m_write_file):
"""ensure that default behaviour is to not disable pipelining"""
- cc_apt_pipelining.handle("foo", {}, None, mock.MagicMock(), None)
+ cc_apt_pipelining.handle("foo", {}, None, None)
assert 0 == m_write_file.call_count
@mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file")
def test_false_disables_pipelining(self, m_write_file):
"""ensure that pipelining can be disabled with correct config"""
cc_apt_pipelining.handle(
- "foo", {"apt_pipelining": "false"}, None, mock.MagicMock(), None
+ "foo", {"apt_pipelining": "false"}, None, None
)
assert 1 == m_write_file.call_count
args, _ = m_write_file.call_args
diff --git a/tests/unittests/config/test_cc_bootcmd.py b/tests/unittests/config/test_cc_bootcmd.py
index 9831d25e..ee84f8df 100644
--- a/tests/unittests/config/test_cc_bootcmd.py
+++ b/tests/unittests/config/test_cc_bootcmd.py
@@ -1,5 +1,4 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import logging
import re
import tempfile
@@ -15,8 +14,6 @@ from cloudinit.config.schema import (
from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from tests.unittests.util import get_cloud
-LOG = logging.getLogger(__name__)
-
class FakeExtendedTempFile:
def __init__(self, suffix):
@@ -50,7 +47,7 @@ class TestBootcmd(CiTestCase):
"""When the provided config doesn't contain bootcmd, skip it."""
cfg = {}
mycloud = get_cloud()
- handle("notimportant", cfg, mycloud, LOG, None)
+ handle("notimportant", cfg, mycloud, None)
self.assertIn(
"Skipping module named notimportant, no 'bootcmd' key",
self.logs.getvalue(),
@@ -61,7 +58,7 @@ class TestBootcmd(CiTestCase):
invalid_config = {"bootcmd": 1}
cc = get_cloud()
with self.assertRaises(TypeError) as context_manager:
- handle("cc_bootcmd", invalid_config, cc, LOG, [])
+ handle("cc_bootcmd", invalid_config, cc, [])
self.assertIn("Failed to shellify bootcmd", self.logs.getvalue())
self.assertEqual(
"Input to shellify was type 'int'. Expected list or tuple.",
@@ -73,7 +70,7 @@ class TestBootcmd(CiTestCase):
}
cc = get_cloud()
with self.assertRaises(TypeError) as context_manager:
- handle("cc_bootcmd", invalid_config, cc, LOG, [])
+ handle("cc_bootcmd", invalid_config, cc, [])
logs = self.logs.getvalue()
self.assertIn("Failed to shellify", logs)
self.assertEqual(
@@ -93,7 +90,7 @@ class TestBootcmd(CiTestCase):
with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
with self.allow_subp(["/bin/sh"]):
- handle("cc_bootcmd", valid_config, cc, LOG, [])
+ handle("cc_bootcmd", valid_config, cc, [])
self.assertEqual(
my_id + " iid-datasource-none\n", util.load_file(out_file)
)
@@ -106,7 +103,7 @@ class TestBootcmd(CiTestCase):
with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
with self.allow_subp(["/bin/sh"]):
with self.assertRaises(subp.ProcessExecutionError) as ctxt:
- handle("does-not-matter", valid_config, cc, LOG, [])
+ handle("does-not-matter", valid_config, cc, [])
self.assertIn(
"Unexpected error while running command.\nCommand: ['/bin/sh',",
str(ctxt.exception),
diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py
index 19e5d422..c40f1e83 100644
--- a/tests/unittests/config/test_cc_ca_certs.py
+++ b/tests/unittests/config/test_cc_ca_certs.py
@@ -1,5 +1,4 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import logging
import re
import shutil
import tempfile
@@ -9,7 +8,9 @@ from unittest import mock
import pytest
-from cloudinit import distros, helpers, subp, util
+from cloudinit import distros, helpers
+from cloudinit import log as logger
+from cloudinit import subp, util
from cloudinit.config import cc_ca_certs
from cloudinit.config.schema import (
SchemaValidationError,
@@ -25,7 +26,6 @@ class TestNoConfig(unittest.TestCase):
super(TestNoConfig, self).setUp()
self.name = "ca-certs"
self.cloud_init = None
- self.log = logging.getLogger("TestNoConfig")
self.args = []
def test_no_config(self):
@@ -41,9 +41,7 @@ class TestNoConfig(unittest.TestCase):
mock.patch.object(cc_ca_certs, "update_ca_certs")
)
- cc_ca_certs.handle(
- self.name, config, self.cloud_init, self.log, self.args
- )
+ cc_ca_certs.handle(self.name, config, self.cloud_init, self.args)
self.assertEqual(util_mock.call_count, 0)
self.assertEqual(certs_mock.call_count, 0)
@@ -54,7 +52,6 @@ class TestConfig(TestCase):
super(TestConfig, self).setUp()
self.name = "ca-certs"
self.paths = None
- self.log = logging.getLogger("TestNoConfig")
self.args = []
def _fetch_distro(self, kind):
@@ -91,7 +88,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
cloud = get_cloud(distro_name)
- cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, cloud, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
@@ -108,7 +105,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
cloud = get_cloud(distro_name)
- cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, cloud, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
@@ -126,7 +123,7 @@ class TestConfig(TestCase):
self._mock_init()
cloud = get_cloud(distro_name)
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
- cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, cloud, self.args)
self.mock_add.assert_called_once_with(conf, ["CERT1"])
self.assertEqual(self.mock_update.call_count, 1)
@@ -144,7 +141,7 @@ class TestConfig(TestCase):
self._mock_init()
cloud = get_cloud(distro_name)
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
- cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, cloud, self.args)
self.mock_add.assert_called_once_with(conf, ["CERT1", "CERT2"])
self.assertEqual(self.mock_update.call_count, 1)
@@ -161,7 +158,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
cloud = get_cloud(distro_name)
- cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, cloud, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
@@ -178,7 +175,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
cloud = get_cloud(distro_name)
- cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, cloud, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
@@ -198,7 +195,7 @@ class TestConfig(TestCase):
self._mock_init()
cloud = get_cloud(distro_name)
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
- cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, cloud, self.args)
self.assertEqual(self.mock_remove.call_count, 1)
self.mock_add.assert_called_once_with(conf, ["CERT1"])
@@ -311,6 +308,7 @@ class TestRemoveDefaultCaCerts(TestCase):
"cloud_dir": tmpdir,
}
)
+ self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat")
def test_commands(self):
ca_certs_content = "# line1\nline2\nline3\n"
@@ -318,6 +316,7 @@ class TestRemoveDefaultCaCerts(TestCase):
"# line1\n# Modified by cloud-init to deselect certs due to"
" user-data\n!line2\n!line3\n"
)
+ self.m_stat.return_value.st_size = 1
for distro_name in cc_ca_certs.distros:
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
@@ -363,6 +362,18 @@ class TestRemoveDefaultCaCerts(TestCase):
else:
assert mock_subp.call_count == 0
+ def test_non_existent_cert_cfg(self):
+ self.m_stat.return_value.st_size = 0
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with ExitStack() as mocks:
+ mocks.enter_context(
+ mock.patch.object(util, "delete_dir_contents")
+ )
+ mocks.enter_context(mock.patch.object(subp, "subp"))
+ cc_ca_certs.disable_default_ca_certs(distro_name, conf)
+
class TestCACertsSchema:
"""Directly test schema rather than through handle."""
@@ -422,25 +433,23 @@ class TestCACertsSchema:
@mock.patch.object(cc_ca_certs, "update_ca_certs")
def test_deprecate_key_warnings(self, update_ca_certs, caplog):
"""Assert warnings are logged for deprecated keys."""
- log = logging.getLogger("CALogTest")
+ logger.setupLogging()
cloud = get_cloud("ubuntu")
cc_ca_certs.handle(
- "IGNORE", {"ca-certs": {"remove-defaults": False}}, cloud, log, []
+ "IGNORE", {"ca-certs": {"remove-defaults": False}}, cloud, []
)
expected_warnings = [
- "DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'"
- " instead.",
- "DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated."
- " Use 'ca_certs.remove_defaults' instead.",
+ "Key 'ca-certs' is deprecated in",
+ "Key 'remove-defaults' is deprecated in",
]
for warning in expected_warnings:
assert warning in caplog.text
+ assert "DEPRECAT" in caplog.text
assert 1 == update_ca_certs.call_count
@mock.patch.object(cc_ca_certs, "update_ca_certs")
def test_duplicate_keys(self, update_ca_certs, caplog):
"""Assert warnings are logged for deprecated keys."""
- log = logging.getLogger("CALogTest")
cloud = get_cloud("ubuntu")
cc_ca_certs.handle(
"IGNORE",
@@ -449,7 +458,6 @@ class TestCACertsSchema:
"ca_certs": {"remove_defaults": False},
},
cloud,
- log,
[],
)
expected_warning = (
@@ -458,6 +466,3 @@ class TestCACertsSchema:
)
assert expected_warning in caplog.text
assert 1 == update_ca_certs.call_count
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py
index 606eada4..9d8ba1f1 100644
--- a/tests/unittests/config/test_cc_chef.py
+++ b/tests/unittests/config/test_cc_chef.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
-import logging
import os
import re
@@ -25,8 +24,6 @@ from tests.unittests.helpers import (
)
from tests.unittests.util import MockDistro, get_cloud
-LOG = logging.getLogger(__name__)
-
CLIENT_TEMPL = cloud_init_project_dir("templates/chef_client.rb.tmpl")
@@ -128,7 +125,7 @@ class TestChef(FilesystemMockingTestCase):
self.patchOS(self.tmp)
cfg = {}
- cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ cc_chef.handle("chef", cfg, get_cloud(), [])
for d in cc_chef.CHEF_DIRS:
self.assertFalse(os.path.isdir(d))
@@ -175,7 +172,7 @@ class TestChef(FilesystemMockingTestCase):
),
},
}
- cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ cc_chef.handle("chef", cfg, get_cloud(), [])
for d in cc_chef.CHEF_DIRS:
self.assertTrue(os.path.isdir(d))
c = util.load_file(cc_chef.CHEF_RB_PATH)
@@ -210,7 +207,7 @@ class TestChef(FilesystemMockingTestCase):
},
},
}
- cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ cc_chef.handle("chef", cfg, get_cloud(), [])
c = util.load_file(cc_chef.CHEF_FB_PATH)
self.assertEqual(
{
@@ -237,7 +234,7 @@ class TestChef(FilesystemMockingTestCase):
"show_time": None,
},
}
- cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ cc_chef.handle("chef", cfg, get_cloud(), [])
c = util.load_file(cc_chef.CHEF_RB_PATH)
self.assertNotIn("json_attribs", c)
self.assertNotIn("Formatter.show_time", c)
@@ -262,7 +259,7 @@ class TestChef(FilesystemMockingTestCase):
"validation_cert": v_cert,
},
}
- cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ cc_chef.handle("chef", cfg, get_cloud(), [])
content = util.load_file(cc_chef.CHEF_RB_PATH)
self.assertIn(v_path, content)
util.load_file(v_path)
@@ -287,7 +284,7 @@ class TestChef(FilesystemMockingTestCase):
}
util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
util.write_file(v_path, expected_cert)
- cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ cc_chef.handle("chef", cfg, get_cloud(), [])
content = util.load_file(cc_chef.CHEF_RB_PATH)
self.assertIn(v_path, content)
util.load_file(v_path)
diff --git a/tests/unittests/config/test_cc_disable_ec2_metadata.py b/tests/unittests/config/test_cc_disable_ec2_metadata.py
index 5755e29e..dedfd187 100644
--- a/tests/unittests/config/test_cc_disable_ec2_metadata.py
+++ b/tests/unittests/config/test_cc_disable_ec2_metadata.py
@@ -2,7 +2,6 @@
"""Tests cc_disable_ec2_metadata handler"""
-import logging
import pytest
@@ -14,8 +13,6 @@ from cloudinit.config.schema import (
)
from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
-LOG = logging.getLogger(__name__)
-
DISABLE_CFG = {"disable_ec2_metadata": "true"}
@@ -25,7 +22,7 @@ class TestEC2MetadataRoute(CiTestCase):
def test_disable_ifconfig(self, m_subp, m_which):
"""Set the route if ifconfig command is available"""
m_which.side_effect = lambda x: x if x == "ifconfig" else None
- ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ ec2_meta.handle("foo", DISABLE_CFG, None, None)
m_subp.assert_called_with(
["route", "add", "-host", "169.254.169.254", "reject"],
capture=False,
@@ -36,7 +33,7 @@ class TestEC2MetadataRoute(CiTestCase):
def test_disable_ip(self, m_subp, m_which):
"""Set the route if ip command is available"""
m_which.side_effect = lambda x: x if x == "ip" else None
- ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ ec2_meta.handle("foo", DISABLE_CFG, None, None)
m_subp.assert_called_with(
["ip", "route", "add", "prohibit", "169.254.169.254"],
capture=False,
@@ -47,7 +44,7 @@ class TestEC2MetadataRoute(CiTestCase):
def test_disable_no_tool(self, m_subp, m_which):
"""Log error when neither route nor ip commands are available"""
m_which.return_value = None # Find neither ifconfig nor ip
- ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ ec2_meta.handle("foo", DISABLE_CFG, None, None)
self.assertEqual(
[mock.call("ip"), mock.call("ifconfig")], m_which.call_args_list
)
diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py
index 496ad8e1..39314313 100644
--- a/tests/unittests/config/test_cc_disk_setup.py
+++ b/tests/unittests/config/test_cc_disk_setup.py
@@ -75,7 +75,7 @@ class TestGetMbrHddSize(TestCase):
return hdd_size_in_bytes, None
elif "--getss" in cmd:
return sector_size_in_bytes, None
- raise Exception("Unexpected blockdev command called")
+ raise RuntimeError("Unexpected blockdev command called")
self.subp.side_effect = _subp
diff --git a/tests/unittests/config/test_cc_final_message.py b/tests/unittests/config/test_cc_final_message.py
index 46ba99b2..191915d3 100644
--- a/tests/unittests/config/test_cc_final_message.py
+++ b/tests/unittests/config/test_cc_final_message.py
@@ -1,5 +1,4 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import logging
from unittest import mock
import pytest
@@ -36,7 +35,7 @@ class TestHandle:
paths=mock.Mock(boot_finished=boot_finished.strpath)
)
- handle(None, {}, m_cloud, logging.getLogger(), [])
+ handle(None, {}, m_cloud, [])
# We should not change the status of the instance directory
assert instance_dir_exists == instance_dir.exists()
diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py
index 13622332..f9bee391 100644
--- a/tests/unittests/config/test_cc_growpart.py
+++ b/tests/unittests/config/test_cc_growpart.py
@@ -102,7 +102,6 @@ class TestDisabled(unittest.TestCase):
super(TestDisabled, self).setUp()
self.name = "growpart"
self.cloud = None
- self.log = logging.getLogger("TestDisabled")
self.args = []
self.handle = cc_growpart.handle
@@ -114,7 +113,7 @@ class TestDisabled(unittest.TestCase):
config = {"growpart": {"mode": "off"}}
with mock.patch.object(cc_growpart, "resizer_factory") as mockobj:
- self.handle(self.name, config, self.cloud, self.log, self.args)
+ self.handle(self.name, config, self.cloud, self.args)
self.assertEqual(mockobj.call_count, 0)
@@ -144,7 +143,7 @@ class TestConfig(TestCase):
) as mockobj:
config = {"growpart": {"mode": "auto"}}
- self.handle(self.name, config, self.cloud, self.log, self.args)
+ self.handle(self.name, config, self.cloud, self.args)
mockobj.assert_has_calls(
[
@@ -167,7 +166,6 @@ class TestConfig(TestCase):
self.name,
config,
self.cloud,
- self.log,
self.args,
)
@@ -271,7 +269,7 @@ class TestConfig(TestCase):
)
)
- self.handle(self.name, {}, self.cloud, self.log, self.args)
+ self.handle(self.name, {}, self.cloud, self.args)
factory.assert_called_once_with("auto", self.distro)
rsdevs.assert_called_once_with(myresizer, ["/"])
@@ -381,7 +379,7 @@ class TestEncrypted:
return "/dev/vdz"
elif value.startswith("/dev"):
return value
- raise Exception(f"unexpected value {value}")
+ raise RuntimeError(f"unexpected value {value}")
def _realpath_side_effect(self, value):
return "/dev/dm-1" if value.startswith("/dev/mapper") else value
diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py
index aa076d19..b4bd48df 100644
--- a/tests/unittests/config/test_cc_grub_dpkg.py
+++ b/tests/unittests/config/test_cc_grub_dpkg.py
@@ -1,6 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from logging import Logger
from unittest import mock
import pytest
@@ -11,7 +10,7 @@ from cloudinit.config.schema import (
get_schema,
validate_cloudconfig_schema,
)
-from cloudinit.subp import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError, SubpResult
from tests.unittests.helpers import does_not_raise, skipUnlessJsonSchema
@@ -21,7 +20,7 @@ class TestFetchIdevs:
# Note: udevadm info returns devices in a large single line string
@pytest.mark.parametrize(
"grub_output,path_exists,expected_log_call,udevadm_output"
- ",expected_idevs",
+ ",expected_idevs,is_efi_boot",
[
# Inside a container, grub not installed
(
@@ -30,6 +29,7 @@ class TestFetchIdevs:
mock.call("'grub-probe' not found in $PATH"),
"",
"",
+ False,
),
# Inside a container, grub installed
(
@@ -38,10 +38,11 @@ class TestFetchIdevs:
mock.call("grub-probe 'failed to get canonical path'"),
"",
"",
+ False,
),
# KVM Instance
(
- ["/dev/vda"],
+ SubpResult("/dev/vda", ""),
True,
None,
(
@@ -49,18 +50,20 @@ class TestFetchIdevs:
"/dev/disk/by-path/virtio-pci-0000:00:00.0 ",
),
"/dev/vda",
+ False,
),
# Xen Instance
(
- ["/dev/xvda"],
+ SubpResult("/dev/xvda", ""),
True,
None,
"",
"/dev/xvda",
+ False,
),
# NVMe Hardware Instance
(
- ["/dev/nvme1n1"],
+ SubpResult("/dev/nvme1n1", ""),
True,
None,
(
@@ -69,10 +72,11 @@ class TestFetchIdevs:
"/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ",
),
"/dev/disk/by-id/nvme-Company_hash000",
+ False,
),
# SCSI Hardware Instance
(
- ["/dev/sda"],
+ SubpResult("/dev/sda", ""),
True,
None,
(
@@ -81,38 +85,71 @@ class TestFetchIdevs:
"/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ",
),
"/dev/disk/by-id/company-user-1",
+ False,
+ ),
+ # UEFI Hardware Instance
+ (
+ SubpResult("/dev/sda2", ""),
+ True,
+ None,
+ (
+ "/dev/disk/by-id/scsi-3500a075116e6875a "
+ "/dev/disk/by-id/scsi-SATA_Crucial_CT525MX3_171816E6875A "
+ "/dev/disk/by-id/scsi-0ATA_Crucial_CT525MX3_171816E6875A "
+ "/dev/disk/by-path/pci-0000:00:17.0-ata-1 "
+ "/dev/disk/by-id/wwn-0x500a075116e6875a "
+ "/dev/disk/by-id/ata-Crucial_CT525MX300SSD1_171816E6875A"
+ ),
+ "/dev/disk/by-id/ata-Crucial_CT525MX300SSD1_171816E6875A-"
+ "part1",
+ True,
),
],
)
+ @mock.patch("cloudinit.config.cc_grub_dpkg.is_efi_booted")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
@mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.LOG")
def test_fetch_idevs(
self,
+ m_log,
m_subp,
m_exists,
m_logexc,
+ m_efi_booted,
grub_output,
path_exists,
expected_log_call,
udevadm_output,
expected_idevs,
+ is_efi_boot,
):
- """Tests outputs from grub-probe and udevadm info against grub-dpkg"""
- m_subp.side_effect = [grub_output, ["".join(udevadm_output)]]
+ """Tests outputs from grub-probe and udevadm info against grub_dpkg"""
+ m_subp.side_effect = [
+ grub_output,
+ SubpResult("".join(udevadm_output), ""),
+ ]
m_exists.return_value = path_exists
- log = mock.Mock(spec=Logger)
- idevs = fetch_idevs(log)
- assert expected_idevs == idevs
+ m_efi_booted.return_value = is_efi_boot
+
+ idevs = fetch_idevs()
+
+ if is_efi_boot:
+ assert expected_idevs.startswith(idevs) is True
+ else:
+ assert idevs == expected_idevs
+
if expected_log_call is not None:
- assert expected_log_call in log.debug.call_args_list
+ assert expected_log_call in m_log.debug.call_args_list
class TestHandle:
"""Tests cc_grub_dpkg.handle()"""
@pytest.mark.parametrize(
- "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output",
+ "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,"
+ "expected_log_output,is_uefi",
[
(
# No configuration
@@ -120,9 +157,12 @@ class TestHandle:
None,
"/dev/disk/by-id/nvme-Company_hash000",
(
- "Setting grub debconf-set-selections with ",
- "'/dev/disk/by-id/nvme-Company_hash000','false'",
+ "Setting grub debconf-set-selections with '%s'",
+ "grub-pc grub-pc/install_devices string "
+ "/dev/disk/by-id/nvme-Company_hash000\n"
+ "grub-pc grub-pc/install_devices_empty boolean false\n",
),
+ False,
),
(
# idevs set, idevs_empty unset
@@ -130,9 +170,11 @@ class TestHandle:
None,
"/dev/sda",
(
- "Setting grub debconf-set-selections with ",
- "'/dev/sda','false'",
+ "Setting grub debconf-set-selections with '%s'",
+ "grub-pc grub-pc/install_devices string /dev/sda\n"
+ "grub-pc grub-pc/install_devices_empty boolean false\n",
),
+ False,
),
(
# idevs unset, idevs_empty set
@@ -140,9 +182,11 @@ class TestHandle:
"true",
"/dev/xvda",
(
- "Setting grub debconf-set-selections with ",
- "'/dev/xvda','true'",
+ "Setting grub debconf-set-selections with '%s'",
+ "grub-pc grub-pc/install_devices string /dev/xvda\n"
+ "grub-pc grub-pc/install_devices_empty boolean true\n",
),
+ False,
),
(
# idevs set, idevs_empty set
@@ -150,9 +194,11 @@ class TestHandle:
False,
"/dev/disk/by-id/company-user-1",
(
- "Setting grub debconf-set-selections with ",
- "'/dev/vda','false'",
+ "Setting grub debconf-set-selections with '%s'",
+ "grub-pc grub-pc/install_devices string /dev/vda\n"
+ "grub-pc grub-pc/install_devices_empty boolean false\n",
),
+ False,
),
(
# idevs set, idevs_empty set
@@ -161,17 +207,34 @@ class TestHandle:
True,
"",
(
- "Setting grub debconf-set-selections with ",
- "'/dev/nvme0n1','true'",
+ "Setting grub debconf-set-selections with '%s'",
+ "grub-pc grub-pc/install_devices string /dev/nvme0n1\n"
+ "grub-pc grub-pc/install_devices_empty boolean true\n",
),
+ False,
+ ),
+ (
+ # uefi active, idevs set
+ "/dev/sda1",
+ False,
+ "/dev/sda1",
+ (
+ "Setting grub debconf-set-selections with '%s'",
+ "grub-pc grub-efi/install_devices string /dev/sda1\n",
+ ),
+ True,
),
],
)
@mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.is_efi_booted")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.LOG")
def test_handle(
self,
+ m_log,
+ m_is_efi_booted,
m_subp,
m_logexc,
m_fetch_idevs,
@@ -179,17 +242,19 @@ class TestHandle:
cfg_idevs_empty,
fetch_idevs_output,
expected_log_output,
+ is_uefi,
):
"""Test setting of correct debconf database entries"""
+ m_is_efi_booted.return_value = is_uefi
m_fetch_idevs.return_value = fetch_idevs_output
- log = mock.Mock(spec=Logger)
cfg = {"grub_dpkg": {}}
if cfg_idevs is not None:
cfg["grub_dpkg"]["grub-pc/install_devices"] = cfg_idevs
if cfg_idevs_empty is not None:
cfg["grub_dpkg"]["grub-pc/install_devices_empty"] = cfg_idevs_empty
- handle(mock.Mock(), cfg, mock.Mock(), log, mock.Mock())
- log.debug.assert_called_with("".join(expected_log_output))
+ handle(mock.Mock(), cfg, mock.Mock(), mock.Mock())
+ print(m_log.debug.call_args_list)
+ m_log.debug.assert_called_with(*expected_log_output)
class TestGrubDpkgSchema:
@@ -235,8 +300,8 @@ class TestGrubDpkgSchema:
pytest.raises(
SchemaValidationError,
match=(
- "Cloud config schema deprecations: grub-dpkg:"
- " Deprecated in version 22.2. Use "
+ "Cloud config schema deprecations: grub-dpkg: An alias"
+ " for ``grub_dpkg`` Deprecated in version 22.2. Use "
"``grub_dpkg`` instead."
),
),
diff --git a/tests/unittests/config/test_cc_install_hotplug.py b/tests/unittests/config/test_cc_install_hotplug.py
index e67fce60..66e582c9 100644
--- a/tests/unittests/config/test_cc_install_hotplug.py
+++ b/tests/unittests/config/test_cc_install_hotplug.py
@@ -57,7 +57,7 @@ class TestInstallHotplug:
else:
libexecdir = "/usr/lib/cloud-init"
with mock.patch("os.path.exists", return_value=libexec_exists):
- handle(None, {}, m_cloud, mock.Mock(), None)
+ handle(None, {}, m_cloud, None)
mocks.m_write.assert_called_once_with(
filename=HOTPLUG_UDEV_PATH,
content=HOTPLUG_UDEV_RULES_TEMPLATE.format(
@@ -80,7 +80,7 @@ class TestInstallHotplug:
m_cloud = mock.MagicMock()
m_cloud.datasource.get_supported_events.return_value = {}
- handle(None, {}, m_cloud, mock.Mock(), None)
+ handle(None, {}, m_cloud, None)
assert mocks.m_write.call_args_list == []
assert mocks.m_del.call_args_list == []
assert mocks.m_subp.call_args_list == []
@@ -92,7 +92,7 @@ class TestInstallHotplug:
EventScope.NETWORK: {EventType.HOTPLUG}
}
- handle(None, {}, m_cloud, mock.Mock(), None)
+ handle(None, {}, m_cloud, None)
assert mocks.m_write.call_args_list == []
assert mocks.m_del.call_args_list == []
assert mocks.m_subp.call_args_list == []
@@ -103,7 +103,7 @@ class TestInstallHotplug:
m_cloud = mock.MagicMock()
m_cloud.datasource.get_supported_events.return_value = {}
- handle(None, {}, m_cloud, mock.Mock(), None)
+ handle(None, {}, m_cloud, None)
mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH)
assert mocks.m_subp.call_args_list == [
mock.call(
@@ -123,7 +123,7 @@ class TestInstallHotplug:
EventScope.NETWORK: {EventType.HOTPLUG}
}
- handle(None, {}, m_cloud, mock.Mock(), None)
+ handle(None, {}, m_cloud, None)
assert mocks.m_del.call_args_list == []
assert mocks.m_write.call_args_list == []
assert mocks.m_subp.call_args_list == []
diff --git a/tests/unittests/config/test_cc_keys_to_console.py b/tests/unittests/config/test_cc_keys_to_console.py
index 61f62e96..5392b867 100644
--- a/tests/unittests/config/test_cc_keys_to_console.py
+++ b/tests/unittests/config/test_cc_keys_to_console.py
@@ -42,7 +42,7 @@ class TestHandle:
m_path_exists.return_value = True
m_subp.return_value = ("", "")
- cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ())
+ cc_keys_to_console.handle("name", cfg, mock.Mock(), ())
assert subp_called == (m_subp.call_count == 1)
diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py
index b08e3d44..c37f619e 100644
--- a/tests/unittests/config/test_cc_landscape.py
+++ b/tests/unittests/config/test_cc_landscape.py
@@ -42,7 +42,7 @@ class TestLandscape(FilesystemMockingTestCase):
mycloud = get_cloud("ubuntu")
mycloud.distro = mock.MagicMock()
cfg = {"landscape": {}}
- cc_landscape.handle("notimportant", cfg, mycloud, LOG, None)
+ cc_landscape.handle("notimportant", cfg, mycloud, None)
self.assertFalse(mycloud.distro.install_packages.called)
def test_handler_error_on_invalid_landscape_type(self):
@@ -50,7 +50,7 @@ class TestLandscape(FilesystemMockingTestCase):
mycloud = get_cloud("ubuntu")
cfg = {"landscape": "wrongtype"}
with self.assertRaises(RuntimeError) as context_manager:
- cc_landscape.handle("notimportant", cfg, mycloud, LOG, None)
+ cc_landscape.handle("notimportant", cfg, mycloud, None)
self.assertIn(
"'landscape' key existed in config, but not a dict",
str(context_manager.exception),
@@ -68,7 +68,6 @@ class TestLandscape(FilesystemMockingTestCase):
"notimportant",
cfg,
mycloud,
- LOG,
None,
)
self.assertEqual(
@@ -99,7 +98,6 @@ class TestLandscape(FilesystemMockingTestCase):
"notimportant",
cfg,
mycloud,
- LOG,
None,
)
self.assertEqual(
@@ -136,7 +134,6 @@ class TestLandscape(FilesystemMockingTestCase):
"notimportant",
cfg,
mycloud,
- LOG,
None,
)
self.assertEqual(expected, dict(ConfigObj(self.conf)))
@@ -167,7 +164,6 @@ class TestLandscape(FilesystemMockingTestCase):
"notimportant",
cfg,
mycloud,
- LOG,
None,
)
self.assertEqual(expected, dict(ConfigObj(self.conf)))
diff --git a/tests/unittests/config/test_cc_locale.py b/tests/unittests/config/test_cc_locale.py
index d64610b6..a20cf195 100644
--- a/tests/unittests/config/test_cc_locale.py
+++ b/tests/unittests/config/test_cc_locale.py
@@ -47,7 +47,7 @@ class TestLocale(FilesystemMockingTestCase):
with mock.patch("cloudinit.distros.arch.subp.subp") as m_subp:
with mock.patch("cloudinit.distros.arch.LOG.warning") as m_LOG:
- cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ cc_locale.handle("cc_locale", cfg, cc, [])
m_LOG.assert_called_with(
"Invalid locale_configfile %s, "
"only supported value is "
@@ -67,7 +67,7 @@ class TestLocale(FilesystemMockingTestCase):
"locale": "My.Locale",
}
cc = get_cloud("sles")
- cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ cc_locale.handle("cc_locale", cfg, cc, [])
if cc.distro.uses_systemd():
locale_conf = cc.distro.systemd_locale_conf_fn
else:
@@ -82,7 +82,7 @@ class TestLocale(FilesystemMockingTestCase):
def test_set_locale_sles_default(self):
cfg = {}
cc = get_cloud("sles")
- cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ cc_locale.handle("cc_locale", cfg, cc, [])
if cc.distro.uses_systemd():
locale_conf = cc.distro.systemd_locale_conf_fn
@@ -105,7 +105,7 @@ class TestLocale(FilesystemMockingTestCase):
with mock.patch(
"cloudinit.distros.debian.LOCALE_CONF_FN", locale_conf
):
- cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ cc_locale.handle("cc_locale", cfg, cc, [])
m_subp.assert_called_with(
[
"update-locale",
@@ -123,7 +123,7 @@ class TestLocale(FilesystemMockingTestCase):
with mock.patch.object(cc.distro, "uses_systemd") as m_use_sd:
m_use_sd.return_value = True
with mock.patch(update_sysconfig) as m_update_syscfg:
- cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ cc_locale.handle("cc_locale", cfg, cc, [])
m_update_syscfg.assert_called_with(
"/etc/locale.conf", {"LANG": "en_US.UTF-8"}
)
diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py
index 184b586e..42985622 100644
--- a/tests/unittests/config/test_cc_lxd.py
+++ b/tests/unittests/config/test_cc_lxd.py
@@ -53,7 +53,7 @@ class TestLxd(t_help.CiTestCase):
subp.call_args_list = []
install.call_args_list = []
exists.call_args_list = []
- cc_lxd.handle("cc_lxd", lxd_cfg, cc, self.logger, [])
+ cc_lxd.handle("cc_lxd", lxd_cfg, cc, [])
if cmd:
which.assert_called_with(cmd)
# no bridge config, so maybe_cleanup should not be called.
@@ -100,10 +100,10 @@ class TestLxd(t_help.CiTestCase):
cc = get_cloud()
cc.distro = mock.MagicMock()
mock_subp.which.return_value = None
- cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, self.logger, [])
+ cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, [])
self.assertNotIn("WARN", self.logs.getvalue())
self.assertTrue(cc.distro.install_packages.called)
- cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, self.logger, [])
+ cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, [])
self.assertFalse(m_maybe_clean.called)
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
self.assertEqual(sorted(install_pkg), ["lxd", "zfsutils-linux"])
@@ -113,7 +113,7 @@ class TestLxd(t_help.CiTestCase):
def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
cc = get_cloud()
cc.distro = mock.MagicMock()
- cc_lxd.handle("cc_lxd", {"lxd": {}}, cc, self.logger, [])
+ cc_lxd.handle("cc_lxd", {"lxd": {}}, cc, [])
self.assertFalse(cc.distro.install_packages.called)
self.assertFalse(mock_subp.subp.called)
self.assertFalse(m_maybe_clean.called)
@@ -123,7 +123,7 @@ class TestLxd(t_help.CiTestCase):
def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
cc = get_cloud()
cc.distro = mock.MagicMock()
- cc_lxd.handle("cc_lxd", {"package_update": True}, cc, self.logger, [])
+ cc_lxd.handle("cc_lxd", {"package_update": True}, cc, [])
self.assertFalse(cc.distro.install_packages.called)
self.assertFalse(mock_subp.subp.called)
self.assertFalse(m_maybe_clean.called)
@@ -136,7 +136,6 @@ class TestLxd(t_help.CiTestCase):
"cc_lxd",
{"lxd": {"preseed": '{"chad": True}'}},
cc,
- self.logger,
[],
)
self.assertEqual(
diff --git a/tests/unittests/config/test_cc_mcollective.py b/tests/unittests/config/test_cc_mcollective.py
index a581f9bb..b9f6746b 100644
--- a/tests/unittests/config/test_cc_mcollective.py
+++ b/tests/unittests/config/test_cc_mcollective.py
@@ -149,7 +149,7 @@ class TestHandler(t_help.TestCase):
cc.distro = t_help.mock.MagicMock()
mock_util.load_file.return_value = b""
mycfg = {"mcollective": {"conf": {"loglevel": "debug"}}}
- cc_mcollective.handle("cc_mcollective", mycfg, cc, LOG, [])
+ cc_mcollective.handle("cc_mcollective", mycfg, cc, [])
self.assertTrue(cc.distro.install_packages.called)
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
self.assertEqual(install_pkg, ("mcollective",))
diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py
index 0073829a..bd3432a7 100644
--- a/tests/unittests/config/test_cc_mounts.py
+++ b/tests/unittests/config/test_cc_mounts.py
@@ -64,7 +64,7 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.mock_existence_of_disk(disk_path)
self.assertEqual(
disk_path,
- cc_mounts.sanitize_devname(disk_path, lambda x: None, mock.Mock()),
+ cc_mounts.sanitize_devname(disk_path, lambda x: None),
)
def test_existent_disk_name_returns_full_path(self):
@@ -73,7 +73,7 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.mock_existence_of_disk(disk_path)
self.assertEqual(
disk_path,
- cc_mounts.sanitize_devname(disk_name, lambda x: None, mock.Mock()),
+ cc_mounts.sanitize_devname(disk_name, lambda x: None),
)
def test_existent_meta_disk_is_returned(self):
@@ -82,7 +82,8 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.assertEqual(
actual_disk_path,
cc_mounts.sanitize_devname(
- "ephemeral0", lambda x: actual_disk_path, mock.Mock()
+ "ephemeral0",
+ lambda x: actual_disk_path,
),
)
@@ -93,7 +94,8 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.assertEqual(
actual_partition_path,
cc_mounts.sanitize_devname(
- "ephemeral0.1", lambda x: disk_name, mock.Mock()
+ "ephemeral0.1",
+ lambda x: disk_name,
),
)
@@ -104,7 +106,8 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.assertEqual(
actual_partition_path,
cc_mounts.sanitize_devname(
- "ephemeral0.1", lambda x: disk_name, mock.Mock()
+ "ephemeral0.1",
+ lambda x: disk_name,
),
)
@@ -115,7 +118,8 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.assertEqual(
actual_partition_path,
cc_mounts.sanitize_devname(
- "ephemeral0", lambda x: disk_name, mock.Mock()
+ "ephemeral0",
+ lambda x: disk_name,
),
)
@@ -126,27 +130,35 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.assertEqual(
actual_partition_path,
cc_mounts.sanitize_devname(
- "ephemeral0.3", lambda x: disk_name, mock.Mock()
+ "ephemeral0.3",
+ lambda x: disk_name,
),
)
def test_transformer_returning_none_returns_none(self):
self.assertIsNone(
cc_mounts.sanitize_devname(
- "ephemeral0", lambda x: None, mock.Mock()
+ "ephemeral0",
+ lambda x: None,
)
)
def test_missing_device_returns_none(self):
self.assertIsNone(
- cc_mounts.sanitize_devname("/dev/sda", None, mock.Mock())
+ cc_mounts.sanitize_devname(
+ "/dev/sda",
+ None,
+ )
)
def test_missing_sys_returns_none(self):
disk_path = "/dev/sda"
self._makedirs(disk_path)
self.assertIsNone(
- cc_mounts.sanitize_devname(disk_path, None, mock.Mock())
+ cc_mounts.sanitize_devname(
+ disk_path,
+ None,
+ )
)
def test_existent_disk_but_missing_partition_returns_none(self):
@@ -154,14 +166,19 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.mock_existence_of_disk(disk_path)
self.assertIsNone(
cc_mounts.sanitize_devname(
- "ephemeral0.1", lambda x: disk_path, mock.Mock()
+ "ephemeral0.1",
+ lambda x: disk_path,
)
)
def test_network_device_returns_network_device(self):
disk_path = "netdevice:/path"
self.assertEqual(
- disk_path, cc_mounts.sanitize_devname(disk_path, None, mock.Mock())
+ disk_path,
+ cc_mounts.sanitize_devname(
+ disk_path,
+ None,
+ ),
)
def test_device_aliases_remapping(self):
@@ -170,7 +187,7 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
self.assertEqual(
disk_path,
cc_mounts.sanitize_devname(
- "mydata", lambda x: None, mock.Mock(), {"mydata": disk_path}
+ "mydata", lambda x: None, {"mydata": disk_path}
),
)
@@ -239,7 +256,7 @@ class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
m_kernel_version.return_value = (4, 20)
m_get_mount_info.return_value = ["", "xfs"]
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, self.cc, self.mock_cloud, [])
self.m_subp_subp.assert_has_calls(
[
mock.call(
@@ -258,7 +275,7 @@ class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
m_kernel_version.return_value = (3, 18)
m_get_mount_info.return_value = ["", "xfs"]
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, self.cc, self.mock_cloud, [])
self.m_subp_subp.assert_has_calls(
[
mock.call(
@@ -284,7 +301,7 @@ class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
m_kernel_version.return_value = (4, 20)
m_get_mount_info.return_value = ["", "btrfs"]
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, self.cc, self.mock_cloud, [])
self.m_subp_subp.assert_has_calls(
[
mock.call(
@@ -310,7 +327,7 @@ class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
m_kernel_version.return_value = (5, 14)
m_get_mount_info.return_value = ["", "ext4"]
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, self.cc, self.mock_cloud, [])
self.m_subp_subp.assert_has_calls(
[
mock.call(
@@ -385,7 +402,7 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
"%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n"
% (self.swap_path,)
)
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, {}, self.mock_cloud, [])
with open(cc_mounts.FSTAB_PATH, "r") as fd:
fstab_new_content = fd.read()
self.assertEqual(fstab_expected_content, fstab_new_content)
@@ -400,7 +417,7 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
with open(cc_mounts.FSTAB_PATH, "w") as fd:
fd.write(fstab)
cc = {"swap": ["filename: /swap.img", "size: 512", "maxsize: 512"]}
- cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, cc, self.mock_cloud, [])
def test_fstab_no_swap_device(self):
"""Ensure that cloud-init adds a discovered swap partition
@@ -415,7 +432,7 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
with open(cc_mounts.FSTAB_PATH, "w") as fd:
fd.write(fstab_original_content)
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, {}, self.mock_cloud, [])
with open(cc_mounts.FSTAB_PATH, "r") as fd:
fstab_new_content = fd.read()
@@ -433,7 +450,7 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
with open(cc_mounts.FSTAB_PATH, "w") as fd:
fd.write(fstab_original_content)
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, {}, self.mock_cloud, [])
with open(cc_mounts.FSTAB_PATH, "r") as fd:
fstab_new_content = fd.read()
@@ -454,7 +471,7 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
with open(cc_mounts.FSTAB_PATH, "w") as fd:
fd.write(fstab_original_content)
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, {}, self.mock_cloud, [])
with open(cc_mounts.FSTAB_PATH, "r") as fd:
fstab_new_content = fd.read()
@@ -474,7 +491,7 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
with open(cc_mounts.FSTAB_PATH, "r") as fd:
fstab_new_content = fd.read()
self.assertEqual(fstab_expected_content, fstab_new_content)
- cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
+ cc_mounts.handle(None, cc, self.mock_cloud, [])
self.m_subp_subp.assert_has_calls(
[
mock.call(["mount", "-a"]),
diff --git a/tests/unittests/config/test_cc_ntp.py b/tests/unittests/config/test_cc_ntp.py
index 365945f8..f8b71d2b 100644
--- a/tests/unittests/config/test_cc_ntp.py
+++ b/tests/unittests/config/test_cc_ntp.py
@@ -357,7 +357,7 @@ class TestNtp(FilesystemMockingTestCase):
def test_no_ntpcfg_does_nothing(self):
"""When no ntp section is defined handler logs a warning and noops."""
- cc_ntp.handle("cc_ntp", {}, None, None, [])
+ cc_ntp.handle("cc_ntp", {}, None, [])
self.assertEqual(
"DEBUG: Skipping module named cc_ntp, "
"not present or disabled by cfg\n",
@@ -380,7 +380,7 @@ class TestNtp(FilesystemMockingTestCase):
ntpconfig = self._mock_ntp_client_config(distro=distro)
confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle("cc_ntp", valid_empty_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", valid_empty_config, mycloud, [])
if distro == "alpine":
# _mock_ntp_client_config call above did not specify a
# client value and so it defaults to "ntp" which on
@@ -415,7 +415,7 @@ class TestNtp(FilesystemMockingTestCase):
)
confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle("cc_ntp", cfg, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", cfg, mycloud, [])
self.assertEqual(
"[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n",
util.load_file(confpath),
@@ -427,7 +427,7 @@ class TestNtp(FilesystemMockingTestCase):
cfg = {"ntp": {"enabled": False}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- cc_ntp.handle("notimportant", cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None)
self.assertEqual(0, m_select.call_count)
@mock.patch("cloudinit.subp.subp")
@@ -502,7 +502,7 @@ class TestNtp(FilesystemMockingTestCase):
m_util.is_BSD.return_value = is_FreeBSD or is_OpenBSD
m_util.is_FreeBSD.return_value = is_FreeBSD
m_util.is_OpenBSD.return_value = is_OpenBSD
- cc_ntp.handle("notimportant", cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None)
m_subp.assert_called_with(expected_service_call, capture=True)
self.assertEqual(expected_content, util.load_file(confpath))
@@ -610,7 +610,7 @@ class TestNtp(FilesystemMockingTestCase):
with mock.patch.object(
mycloud.distro, "install_packages"
) as m_install:
- cc_ntp.handle("notimportant", cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None)
m_install.assert_called_with([client])
m_which.assert_called_with(client)
@@ -667,7 +667,7 @@ class TestNtp(FilesystemMockingTestCase):
mycloud = self._get_cloud(distro)
mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.handle("notimportant", cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None)
self.assertEqual(
"servers []\npools ['mypool.org']\n%s" % custom,
util.load_file(confpath),
@@ -707,9 +707,7 @@ class TestNtp(FilesystemMockingTestCase):
m_select.return_value = ntpconfig
mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.handle(
- "notimportant", {"ntp": cfg}, mycloud, None, None
- )
+ cc_ntp.handle("notimportant", {"ntp": cfg}, mycloud, None)
self.assertEqual(
"servers []\npools ['mypool.org']\n%s" % custom,
util.load_file(confpath),
diff --git a/tests/unittests/config/test_cc_phone_home.py b/tests/unittests/config/test_cc_phone_home.py
index 7964705d..f7547762 100644
--- a/tests/unittests/config/test_cc_phone_home.py
+++ b/tests/unittests/config/test_cc_phone_home.py
@@ -1,4 +1,3 @@
-import logging
from functools import partial
from itertools import count
from unittest import mock
@@ -14,8 +13,7 @@ from cloudinit.config.schema import (
from tests.unittests.helpers import skipUnlessJsonSchema
from tests.unittests.util import get_cloud
-LOG = logging.getLogger("TestNoConfig")
-phone_home = partial(handle, name="test", cloud=get_cloud(), log=LOG, args=[])
+phone_home = partial(handle, name="test", cloud=get_cloud(), args=[])
@pytest.fixture(autouse=True)
diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py
index fbdc06ef..a3c391a0 100644
--- a/tests/unittests/config/test_cc_power_state_change.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -126,11 +126,9 @@ class TestCheckCondition(t_help.TestCase):
def test_cmd_exit_one_false(self):
self.assertEqual(psc.check_condition(self.cmd_with_exit(1)), False)
- def test_cmd_exit_nonzero_warns(self):
- mocklog = mock.Mock()
- self.assertEqual(
- psc.check_condition(self.cmd_with_exit(2), mocklog), False
- )
+ @mock.patch("cloudinit.config.cc_power_state_change.LOG")
+ def test_cmd_exit_nonzero_warns(self, mocklog):
+ self.assertEqual(psc.check_condition(self.cmd_with_exit(2)), False)
self.assertEqual(mocklog.warning.call_count, 1)
@@ -164,7 +162,7 @@ def check_lps_ret(psc_return, mode=None):
if len(errs):
lines = ["Errors in result: %s" % str(psc_return)] + errs
- raise Exception("\n".join(lines))
+ raise RuntimeError("\n".join(lines))
class TestPowerStateChangeSchema:
diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py
index 23461c2b..9c55e9b5 100644
--- a/tests/unittests/config/test_cc_puppet.py
+++ b/tests/unittests/config/test_cc_puppet.py
@@ -1,5 +1,4 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import logging
import textwrap
import pytest
@@ -16,8 +15,6 @@ from cloudinit.subp import ProcessExecutionError
from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from tests.unittests.util import get_cloud
-LOG = logging.getLogger(__name__)
-
@pytest.fixture
def fake_tempdir(mocker, tmpdir):
@@ -36,7 +33,7 @@ class TestManagePuppetServices(CiTestCase):
self,
m_subp,
):
- cc_puppet._manage_puppet_services(LOG, self.cloud, "enable")
+ cc_puppet._manage_puppet_services(self.cloud, "enable")
expected_calls = [
mock.call(
["systemctl", "enable", "puppet-agent.service"],
@@ -49,7 +46,7 @@ class TestManagePuppetServices(CiTestCase):
self,
m_subp,
):
- cc_puppet._manage_puppet_services(LOG, self.cloud, "start")
+ cc_puppet._manage_puppet_services(self.cloud, "start")
expected_calls = [
mock.call(
["systemctl", "start", "puppet-agent.service"],
@@ -60,7 +57,7 @@ class TestManagePuppetServices(CiTestCase):
def test_enable_fallback_on_failure(self, m_subp):
m_subp.side_effect = (ProcessExecutionError, 0)
- cc_puppet._manage_puppet_services(LOG, self.cloud, "enable")
+ cc_puppet._manage_puppet_services(self.cloud, "enable")
expected_calls = [
mock.call(
["systemctl", "enable", "puppet-agent.service"],
@@ -90,7 +87,7 @@ class TestPuppetHandle(CiTestCase):
"""Cloud-config containing no 'puppet' key is skipped."""
cfg = {}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertIn("no 'puppet' configuration found", self.logs.getvalue())
self.assertEqual(0, m_man_puppet.call_count)
@@ -99,11 +96,11 @@ class TestPuppetHandle(CiTestCase):
"""Cloud-config 'puppet' configuration starts puppet."""
cfg = {"puppet": {"install": False}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(2, m_man_puppet.call_count)
expected_calls = [
- mock.call(LOG, self.cloud, "enable"),
- mock.call(LOG, self.cloud, "start"),
+ mock.call(self.cloud, "enable"),
+ mock.call(self.cloud, "start"),
]
self.assertEqual(expected_calls, m_man_puppet.call_args_list)
@@ -113,7 +110,7 @@ class TestPuppetHandle(CiTestCase):
self.cloud.distro = mock.MagicMock()
cfg = {"puppet": {}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(
[mock.call(("puppet-agent", None))],
self.cloud.distro.install_packages.call_args_list,
@@ -125,7 +122,7 @@ class TestPuppetHandle(CiTestCase):
self.cloud.distro = mock.MagicMock()
cfg = {"puppet": {"install": True}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertIn(
[mock.call(("puppet-agent", None))],
self.cloud.distro.install_packages.call_args_list,
@@ -139,7 +136,7 @@ class TestPuppetHandle(CiTestCase):
distro = mock.MagicMock()
self.cloud.distro = distro
cfg = {"puppet": {"install": True, "install_type": "aio"}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
m_aio.assert_called_with(
distro, cc_puppet.AIO_INSTALL_URL, None, None, True
)
@@ -160,7 +157,7 @@ class TestPuppetHandle(CiTestCase):
"install_type": "aio",
}
}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
m_aio.assert_called_with(
distro, cc_puppet.AIO_INSTALL_URL, "6.24.0", None, True
)
@@ -181,7 +178,7 @@ class TestPuppetHandle(CiTestCase):
"install_type": "aio",
}
}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
m_aio.assert_called_with(
distro, cc_puppet.AIO_INSTALL_URL, None, "puppet6", True
)
@@ -202,7 +199,7 @@ class TestPuppetHandle(CiTestCase):
"install_type": "aio",
}
}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
m_aio.assert_called_with(
distro, "http://test.url/path/to/script.sh", None, None, True
)
@@ -223,7 +220,7 @@ class TestPuppetHandle(CiTestCase):
"install_type": "aio",
}
}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
m_aio.assert_called_with(
distro, cc_puppet.AIO_INSTALL_URL, None, None, False
)
@@ -234,7 +231,7 @@ class TestPuppetHandle(CiTestCase):
self.cloud.distro = mock.MagicMock()
cfg = {"puppet": {"version": "3.8"}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(
[mock.call(("puppet-agent", "3.8"))],
self.cloud.distro.install_packages.call_args_list,
@@ -259,7 +256,7 @@ class TestPuppetHandle(CiTestCase):
}
util.write_file(self.conf, "[agent]\nserver = origpuppet\nother = 3")
self.cloud.distro = mock.MagicMock()
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
content = util.load_file(self.conf)
expected = "[agent]\nserver = puppetserver.example.org\nother = 3\n\n"
self.assertEqual(expected, content)
@@ -296,7 +293,7 @@ class TestPuppetHandle(CiTestCase):
}
}
}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
content = util.load_file(self.csr_attributes_path)
expected = textwrap.dedent(
"""\
@@ -315,11 +312,11 @@ class TestPuppetHandle(CiTestCase):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {"exec": True}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(2, m_man_puppet.call_count)
expected_calls = [
- mock.call(LOG, self.cloud, "enable"),
- mock.call(LOG, self.cloud, "start"),
+ mock.call(self.cloud, "enable"),
+ mock.call(self.cloud, "start"),
]
self.assertEqual(expected_calls, m_man_puppet.call_args_list)
self.assertIn(
@@ -332,11 +329,11 @@ class TestPuppetHandle(CiTestCase):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(2, m_man_puppet.call_count)
expected_calls = [
- mock.call(LOG, self.cloud, "enable"),
- mock.call(LOG, self.cloud, "start"),
+ mock.call(self.cloud, "enable"),
+ mock.call(self.cloud, "start"),
]
self.assertEqual(expected_calls, m_man_puppet.call_args_list)
@@ -345,7 +342,7 @@ class TestPuppetHandle(CiTestCase):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {"start_service": False}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(0, m_man_puppet.call_count)
self.assertNotIn(
[mock.call(["systemctl", "start", "puppet-agent"], capture=False)],
@@ -364,7 +361,7 @@ class TestPuppetHandle(CiTestCase):
"exec_args": ["--onetime", "--detailed-exitcodes"],
}
}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(2, m_man_puppet.call_count)
self.assertIn(
[
@@ -388,7 +385,7 @@ class TestPuppetHandle(CiTestCase):
"exec_args": "--onetime --detailed-exitcodes",
}
}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(2, m_man_puppet.call_count)
self.assertIn(
[
@@ -409,10 +406,10 @@ class TestPuppetHandle(CiTestCase):
# puppet-agent not installed, but puppet is
install_pkg.side_effect = (ProcessExecutionError, 0)
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
expected_calls = [
- mock.call(LOG, self.cloud, "enable"),
- mock.call(LOG, self.cloud, "start"),
+ mock.call(self.cloud, "enable"),
+ mock.call(self.cloud, "start"),
]
self.assertEqual(expected_calls, m_man_puppet.call_args_list)
@@ -425,7 +422,7 @@ class TestPuppetHandle(CiTestCase):
# puppet-agent not installed, but puppet is
install_pkg.side_effect = (ProcessExecutionError, 0)
with pytest.raises(ProcessExecutionError):
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(0, m_man_puppet.call_count)
self.assertNotIn(
[
@@ -439,7 +436,7 @@ class TestPuppetHandle(CiTestCase):
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_with_conf_package_name_success(self, m_subp, m_man_puppet):
cfg = {"puppet": {"package_name": "puppet"}}
- cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ cc_puppet.handle("notimportant", cfg, self.cloud, None)
self.assertEqual(2, m_man_puppet.call_count)
diff --git a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
index e038f814..6813320a 100644
--- a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
+++ b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
@@ -110,7 +110,6 @@ class TestRsctNodeFile(t_help.CiTestCase):
util.load_file(fname),
)
- @mock.patch(MPATH + ".refresh_rmc")
@mock.patch(MPATH + ".restart_network_manager")
@mock.patch(MPATH + ".disable_ipv6")
@mock.patch(MPATH + ".refresh_ipv6")
@@ -123,12 +122,14 @@ class TestRsctNodeFile(t_help.CiTestCase):
m_refresh_ipv6,
m_disable_ipv6,
m_restart_nm,
- m_which,
):
- """Basic test of handle."""
+ """Basic test of handle.
+
+ TODO: This test has suspicious mock names, is it actually testing the
+ correct things?
+ """
m_netdev_info.return_value = NET_INFO
- m_which.return_value = "/opt/rsct/bin/rmcctrl"
- ccrmci.handle("refresh_rmc_and_interface", None, None, None, None)
+ ccrmci.handle("refresh_rmc_and_interface", None, None, None)
self.assertEqual(1, m_netdev_info.call_count)
m_refresh_ipv6.assert_called_with("env5")
m_disable_ipv6.assert_called_with(
diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py
index d7fda1b8..90814e61 100644
--- a/tests/unittests/config/test_cc_resizefs.py
+++ b/tests/unittests/config/test_cc_resizefs.py
@@ -83,37 +83,42 @@ class TestResizefs(CiTestCase):
def test_handle_noops_on_disabled(self):
"""The handle function logs when the configuration disables resize."""
cfg = {"resize_rootfs": False}
- handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
+ handle("cc_resizefs", cfg, cloud=None, args=[])
self.assertIn(
"DEBUG: Skipping module named cc_resizefs, resizing disabled\n",
self.logs.getvalue(),
)
@mock.patch("cloudinit.config.cc_resizefs.util.get_mount_info")
- def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
+ @mock.patch("cloudinit.config.cc_resizefs.LOG")
+ def test_handle_warns_on_unknown_mount_info(self, m_log, m_get_mount_info):
"""handle warns when get_mount_info sees unknown filesystem for /."""
m_get_mount_info.return_value = None
cfg = {"resize_rootfs": True}
- handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
+ handle("cc_resizefs", cfg, cloud=None, args=[])
logs = self.logs.getvalue()
self.assertNotIn(
"WARNING: Invalid cloud-config provided:\nresize_rootfs:", logs
)
- self.assertIn(
- "WARNING: Could not determine filesystem type of /\n", logs
+ self.assertEqual(
+ ("Could not determine filesystem type of %s", "/"),
+ m_log.warning.call_args[0],
)
self.assertEqual(
- [mock.call("/", LOG)], m_get_mount_info.call_args_list
+ [mock.call("/", m_log)], m_get_mount_info.call_args_list
)
- def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
+ @mock.patch("cloudinit.config.cc_resizefs.LOG")
+ def test_handle_warns_on_undiscoverable_root_path_in_commandline(
+ self, m_log
+ ):
"""handle noops when the root path is not found on the commandline."""
cfg = {"resize_rootfs": True}
exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists"
def fake_mount_info(path, log):
self.assertEqual("/", path)
- self.assertEqual(LOG, log)
+ self.assertEqual(m_log, log)
return ("/dev/root", "ext4", "/")
with mock.patch(exists_mock_path) as m_exists:
@@ -129,11 +134,11 @@ class TestResizefs(CiTestCase):
"cc_resizefs",
cfg,
cloud=None,
- log=LOG,
args=[],
)
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+ self.assertIn(
+ "Unable to find device '/dev/root'", m_log.warning.call_args[0]
+ )
def test_resize_zfs_cmd_return(self):
zpool = "zroot"
@@ -183,8 +188,8 @@ class TestResizefs(CiTestCase):
cfg = {"resize_rootfs": True}
with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
- handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
- ret = dresize.call_args[0][0]
+ handle("cc_resizefs", cfg, cloud=None, args=[])
+ ret = dresize.call_args[0]
self.assertEqual(("zpool", "online", "-e", "vmzroot", disk), ret)
@@ -217,11 +222,10 @@ class TestResizefs(CiTestCase):
with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
with mock.patch("cloudinit.config.cc_resizefs.os.stat") as m_stat:
m_stat.side_effect = fake_stat
- handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
-
+ handle("cc_resizefs", cfg, cloud=None, args=[])
self.assertEqual(
("zpool", "online", "-e", "zroot", "/dev/" + disk),
- dresize.call_args[0][0],
+ dresize.call_args[0],
)
@@ -276,7 +280,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
"overlayroot",
info,
- LOG,
)
self.assertIsNone(devpath)
self.assertIn(
@@ -306,7 +309,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
"/dev/root",
info,
- LOG,
)
self.assertIsNone(devpath)
logs = self.logs.getvalue()
@@ -321,7 +323,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
"/dev/I/dont/exist",
info,
- LOG,
)
self.assertIsNone(devpath)
self.assertIn(
@@ -339,7 +340,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
"/dev/I/dont/exist",
info,
- LOG,
)
self.assertIsNone(devpath)
self.assertIn(
@@ -363,7 +363,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
"/dev/I/dont/exist",
info,
- LOG,
)
self.assertEqual(
"Something unexpected", str(context_manager.exception)
@@ -381,7 +380,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
fake_devpath,
info,
- LOG,
)
self.assertIsNone(devpath)
self.assertIn(
@@ -403,7 +401,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
fake_devpath,
info,
- LOG,
)
self.assertIsNone(devpath)
self.assertIn(
@@ -433,7 +430,6 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
maybe_get_writable_device_path,
"/dev/root",
info,
- LOG,
)
self.assertEqual("/dev/disk/by-uuid/my-uuid", devpath)
self.assertIn(
@@ -490,7 +486,7 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
):
freebsd.return_value = True
info = "dev=gpt/system mnt_point=/ path=/"
- devpth = maybe_get_writable_device_path("gpt/system", info, LOG)
+ devpth = maybe_get_writable_device_path("gpt/system", info)
self.assertEqual("gpt/system", devpth)
diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py
index 4ae9b3f3..3b32161e 100644
--- a/tests/unittests/config/test_cc_resolv_conf.py
+++ b/tests/unittests/config/test_cc_resolv_conf.py
@@ -55,7 +55,7 @@ class TestResolvConf(FilesystemMockingTestCase):
distro = self._fetch_distro(distro_name, conf)
paths = helpers.Paths({"cloud_dir": self.tmp})
cc = cloud.Cloud(ds, paths, {}, distro, None)
- cc_resolv_conf.handle("cc_resolv_conf", conf, cc, LOG, [])
+ cc_resolv_conf.handle("cc_resolv_conf", conf, cc, [])
@mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
def test_resolv_conf_systemd_resolved(self, m_render_to_file):
diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py
index 1a9c1579..87a6015e 100644
--- a/tests/unittests/config/test_cc_rh_subscription.py
+++ b/tests/unittests/config/test_cc_rh_subscription.py
@@ -55,9 +55,7 @@ class GoodTests(CiTestCase):
Emulates a system that is already registered. Ensure it gets
a non-ProcessExecution error from is_registered()
"""
- self.handle(
- self.name, self.config, self.cloud_init, self.log, self.args
- )
+ self.handle(self.name, self.config, self.cloud_init, self.args)
self.assertEqual(m_sman_cli.call_count, 1)
self.assertIn("System is already registered", self.logs.getvalue())
@@ -70,9 +68,7 @@ class GoodTests(CiTestCase):
" 12345678-abde-abcde-1234-1234567890abc"
)
m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, "bar")]
- self.handle(
- self.name, self.config, self.cloud_init, self.log, self.args
- )
+ self.handle(self.name, self.config, self.cloud_init, self.args)
self.assertIn(mock.call(["identity"]), m_sman_cli.call_args_list)
self.assertIn(
mock.call(
@@ -131,9 +127,7 @@ class GoodTests(CiTestCase):
("Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4", ""),
("", ""),
]
- self.handle(
- self.name, self.config_full, self.cloud_init, self.log, self.args
- )
+ self.handle(self.name, self.config_full, self.cloud_init, self.args)
self.assertEqual(m_sman_cli.call_count, 9)
for call in call_lists:
self.assertIn(mock.call(call), m_sman_cli.call_args_list)
@@ -213,7 +207,6 @@ class TestBadInput(CiTestCase):
self.name,
self.config_no_password,
self.cloud_init,
- self.log,
self.args,
)
self.assertEqual(m_sman_cli.call_count, 0)
@@ -221,9 +214,7 @@ class TestBadInput(CiTestCase):
def test_no_org(self, m_sman_cli):
"""Attempt to register without the org key/value."""
m_sman_cli.side_effect = [subp.ProcessExecutionError]
- self.handle(
- self.name, self.config_no_key, self.cloud_init, self.log, self.args
- )
+ self.handle(self.name, self.config_no_key, self.cloud_init, self.args)
m_sman_cli.assert_called_with(["identity"])
self.assertEqual(m_sman_cli.call_count, 1)
self.assert_logged_warnings(
@@ -245,7 +236,6 @@ class TestBadInput(CiTestCase):
self.name,
self.config_service,
self.cloud_init,
- self.log,
self.args,
)
self.assertEqual(m_sman_cli.call_count, 1)
@@ -268,7 +258,6 @@ class TestBadInput(CiTestCase):
self.name,
self.config_badpool,
self.cloud_init,
- self.log,
self.args,
)
self.assertEqual(m_sman_cli.call_count, 2)
@@ -291,7 +280,6 @@ class TestBadInput(CiTestCase):
self.name,
self.config_badrepo,
self.cloud_init,
- self.log,
self.args,
)
self.assertEqual(m_sman_cli.call_count, 2)
@@ -311,9 +299,7 @@ class TestBadInput(CiTestCase):
subp.ProcessExecutionError,
(self.reg, "bar"),
]
- self.handle(
- self.name, self.config_badkey, self.cloud_init, self.log, self.args
- )
+ self.handle(self.name, self.config_badkey, self.cloud_init, self.args)
self.assertEqual(m_sman_cli.call_count, 1)
self.assert_logged_warnings(
(
diff --git a/tests/unittests/config/test_cc_runcmd.py b/tests/unittests/config/test_cc_runcmd.py
index ab5733a7..9fdc575b 100644
--- a/tests/unittests/config/test_cc_runcmd.py
+++ b/tests/unittests/config/test_cc_runcmd.py
@@ -37,7 +37,7 @@ class TestRuncmd(FilesystemMockingTestCase):
"""When the provided config doesn't contain runcmd, skip it."""
cfg = {}
mycloud = get_cloud(paths=self.paths)
- handle("notimportant", cfg, mycloud, LOG, None)
+ handle("notimportant", cfg, mycloud, None)
self.assertIn(
"Skipping module named notimportant, no 'runcmd' key",
self.logs.getvalue(),
@@ -51,7 +51,7 @@ class TestRuncmd(FilesystemMockingTestCase):
cc = get_cloud(paths=self.paths)
with self.assertRaises(TypeError) as cm:
with self.allow_subp(["/bin/sh"]):
- handle("cc_runcmd", valid_config, cc, LOG, None)
+ handle("cc_runcmd", valid_config, cc, None)
self.assertIn("Failed to shellify", str(cm.exception))
def test_handler_invalid_command_set(self):
@@ -59,7 +59,7 @@ class TestRuncmd(FilesystemMockingTestCase):
invalid_config = {"runcmd": 1}
cc = get_cloud(paths=self.paths)
with self.assertRaises(TypeError) as cm:
- handle("cc_runcmd", invalid_config, cc, LOG, [])
+ handle("cc_runcmd", invalid_config, cc, [])
self.assertIn(
"Failed to shellify 1 into file"
" /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd",
@@ -70,7 +70,7 @@ class TestRuncmd(FilesystemMockingTestCase):
"""Valid runcmd schema is written to a runcmd shell script."""
valid_config = {"runcmd": [["ls", "/"]]}
cc = get_cloud(paths=self.paths)
- handle("cc_runcmd", valid_config, cc, LOG, [])
+ handle("cc_runcmd", valid_config, cc, [])
runcmd_file = os.path.join(
self.new_root,
"var/lib/cloud/instances/iid-datasource-none/scripts/runcmd",
diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py
index 0f43d858..760e3b9a 100644
--- a/tests/unittests/config/test_cc_seed_random.py
+++ b/tests/unittests/config/test_cc_seed_random.py
@@ -71,7 +71,7 @@ class TestRandomSeed(TestCase):
"data": "tiny-tim-was-here",
}
}
- cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), [])
contents = util.load_file(self._seed_file)
self.assertEqual("tiny-tim-was-here", contents)
@@ -90,7 +90,6 @@ class TestRandomSeed(TestCase):
"test",
cfg,
get_cloud("ubuntu"),
- LOG,
[],
)
@@ -103,7 +102,7 @@ class TestRandomSeed(TestCase):
"encoding": "gzip",
}
}
- cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), [])
contents = util.load_file(self._seed_file)
self.assertEqual("tiny-toe", contents)
@@ -116,7 +115,7 @@ class TestRandomSeed(TestCase):
"encoding": "gz",
}
}
- cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), [])
contents = util.load_file(self._seed_file)
self.assertEqual("big-toe", contents)
@@ -129,7 +128,7 @@ class TestRandomSeed(TestCase):
"encoding": "base64",
}
}
- cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), [])
contents = util.load_file(self._seed_file)
self.assertEqual("bubbles", contents)
@@ -142,7 +141,7 @@ class TestRandomSeed(TestCase):
"encoding": "b64",
}
}
- cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), [])
contents = util.load_file(self._seed_file)
self.assertEqual("kit-kat", contents)
@@ -154,7 +153,7 @@ class TestRandomSeed(TestCase):
}
}
c = get_cloud("ubuntu", metadata={"random_seed": "-so-was-josh"})
- cc_seed_random.handle("test", cfg, c, LOG, [])
+ cc_seed_random.handle("test", cfg, c, [])
contents = util.load_file(self._seed_file)
self.assertEqual("tiny-tim-was-here-so-was-josh", contents)
@@ -162,7 +161,7 @@ class TestRandomSeed(TestCase):
c = get_cloud("ubuntu")
self.whichdata = {"pollinate": "/usr/bin/pollinate"}
cfg = {"random_seed": {"command": ["pollinate", "-q"]}}
- cc_seed_random.handle("test", cfg, c, LOG, [])
+ cc_seed_random.handle("test", cfg, c, [])
subp_args = [f["args"] for f in self.subp_called]
self.assertIn(["pollinate", "-q"], subp_args)
@@ -170,7 +169,7 @@ class TestRandomSeed(TestCase):
def test_seed_command_not_provided(self):
c = get_cloud("ubuntu")
self.whichdata = {}
- cc_seed_random.handle("test", {}, c, LOG, [])
+ cc_seed_random.handle("test", {}, c, [])
# subp should not have been called as which would say not available
self.assertFalse(self.subp_called)
@@ -185,14 +184,14 @@ class TestRandomSeed(TestCase):
}
}
self.assertRaises(
- ValueError, cc_seed_random.handle, "test", cfg, c, LOG, []
+ ValueError, cc_seed_random.handle, "test", cfg, c, []
)
def test_seed_command_and_required(self):
c = get_cloud("ubuntu")
self.whichdata = {"foo": "foo"}
cfg = {"random_seed": {"command_required": True, "command": ["foo"]}}
- cc_seed_random.handle("test", cfg, c, LOG, [])
+ cc_seed_random.handle("test", cfg, c, [])
self.assertIn(["foo"], [f["args"] for f in self.subp_called])
@@ -206,7 +205,7 @@ class TestRandomSeed(TestCase):
"file": self._seed_file,
}
}
- cc_seed_random.handle("test", cfg, c, LOG, [])
+ cc_seed_random.handle("test", cfg, c, [])
# this just instists that the first time subp was called,
# RANDOM_SEED_FILE was in the environment set up correctly
diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py
index 2c92949f..1ba7df92 100644
--- a/tests/unittests/config/test_cc_set_hostname.py
+++ b/tests/unittests/config/test_cc_set_hostname.py
@@ -45,7 +45,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, [])
contents = util.load_file("/etc/hostname")
self.assertEqual("blah.yahoo.com", contents.strip())
@@ -61,7 +61,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, [])
contents = util.load_file("/etc/sysconfig/network", decode=False)
n_cfg = ConfigObj(BytesIO(contents))
self.assertEqual({"HOSTNAME": "blah"}, dict(n_cfg))
@@ -74,7 +74,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, [])
contents = util.load_file("/etc/sysconfig/network", decode=False)
n_cfg = ConfigObj(BytesIO(contents))
self.assertEqual({"HOSTNAME": "blah.blah.blah.yahoo.com"}, dict(n_cfg))
@@ -89,7 +89,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, [])
contents = util.load_file("/etc/hostname")
self.assertEqual("blah", contents.strip())
@@ -103,7 +103,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, [])
contents = util.load_file(distro.hostname_conf_fn)
self.assertEqual("blah", contents.strip())
@@ -126,7 +126,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
paths = helpers.Paths({"cloud_dir": self.tmp})
cc = cloud.Cloud(ds, paths, {}, distro, None)
for c in [cfg1, cfg2]:
- cc_set_hostname.handle("cc_set_hostname", c, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", c, cc, [])
print("\n", m_subp.call_args_list)
if c["prefer_fqdn_over_hostname"]:
assert [
@@ -164,19 +164,19 @@ class TestHostname(t_help.FilesystemMockingTestCase):
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
cc_set_hostname.handle(
- "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, []
)
contents = util.load_file("/etc/hostname")
self.assertEqual("hostname1", contents.strip())
cc_set_hostname.handle(
- "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, []
)
self.assertIn(
"DEBUG: No hostname changes. Skipping set-hostname\n",
self.logs.getvalue(),
)
cc_set_hostname.handle(
- "cc_set_hostname", {"hostname": "hostname2.me.com"}, cc, LOG, []
+ "cc_set_hostname", {"hostname": "hostname2.me.com"}, cc, []
)
contents = util.load_file("/etc/hostname")
self.assertEqual("hostname2", contents.strip())
@@ -198,7 +198,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
util.write_file("/etc/hostname", "")
- cc_set_hostname.handle("cc_set_hostname", {}, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", {}, cc, [])
contents = util.load_file("/etc/hostname")
self.assertEqual("", contents.strip())
@@ -216,7 +216,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
# user-provided localhost should not be ignored
util.write_file("/etc/hostname", "")
cc_set_hostname.handle(
- "cc_set_hostname", {"hostname": "localhost"}, cc, LOG, []
+ "cc_set_hostname", {"hostname": "localhost"}, cc, []
)
contents = util.load_file("/etc/hostname")
self.assertEqual("localhost", contents.strip())
@@ -226,7 +226,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
distro = self._fetch_distro("debian")
def set_hostname_error(hostname, fqdn):
- raise Exception("OOPS on: %s" % fqdn)
+ raise RuntimeError("OOPS on: %s" % fqdn)
distro.set_hostname = set_hostname_error
paths = helpers.Paths({"cloud_dir": self.tmp})
@@ -235,7 +235,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr:
cc_set_hostname.handle(
- "somename", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ "somename", {"hostname": "hostname1.me.com"}, cc, []
)
self.assertEqual(
"Failed to set the hostname to hostname1.me.com (hostname1):"
@@ -255,7 +255,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
prev_fn = Path(cc.get_cpath("data")) / "set-hostname"
prev_fn.touch()
- cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, [])
contents = util.load_file("/etc/hostname")
self.assertEqual("blah", contents.strip())
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
index a375c00b..f6885b2b 100644
--- a/tests/unittests/config/test_cc_set_passwords.py
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -118,7 +118,6 @@ def get_chpasswd_calls(cfg, cloud, log):
"IGNORED",
cfg=cfg,
cloud=cloud,
- log=log,
args=[],
)
assert chpasswd.call_count > 0
@@ -132,7 +131,7 @@ class TestSetPasswordsHandle:
def test_handle_on_empty_config(self, m_subp, caplog):
"""handle logs that no password has changed when config is empty."""
cloud = get_cloud()
- setpass.handle("IGNORED", cfg={}, cloud=cloud, log=LOG, args=[])
+ setpass.handle("IGNORED", cfg={}, cloud=cloud, args=[])
assert (
"Leaving SSH config 'PasswordAuthentication' unchanged. "
"ssh_pwauth=None"
@@ -155,7 +154,7 @@ class TestSetPasswordsHandle:
]
cfg = {"chpasswd": {"list": valid_hashed_pwds}}
with mock.patch.object(setpass.Distro, "chpasswd") as chpasswd:
- setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, args=[])
assert "Handling input for chpasswd as list." in caplog.text
assert "Setting hashed password for ['root', 'ubuntu']" in caplog.text
@@ -181,7 +180,7 @@ class TestSetPasswordsHandle:
]
cfg = {"chpasswd": {"users": valid_hashed_pwds}}
with mock.patch.object(setpass.Distro, "chpasswd") as chpasswd:
- setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, args=[])
assert "Handling input for chpasswd as list." not in caplog.text
assert "Setting hashed password for ['root', 'ubuntu']" in caplog.text
first_arg = chpasswd.call_args[0]
@@ -228,7 +227,7 @@ class TestSetPasswordsHandle:
with mock.patch.object(
cloud.distro, "uses_systemd", return_value=False
):
- setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, args=[])
assert [
mock.call(
["pw", "usermod", "ubuntu", "-h", "0"],
@@ -272,7 +271,7 @@ class TestSetPasswordsHandle:
cfg = {"chpasswd": user_cfg}
with mock.patch.object(setpass.Distro, "chpasswd") as chpasswd:
- setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, args=[])
dbg_text = "Handling input for chpasswd as list."
if "list" in cfg["chpasswd"]:
assert dbg_text in caplog.text
@@ -511,7 +510,7 @@ class TestExpire:
mocker.patch.object(cloud.distro, "chpasswd")
m_expire = mocker.patch.object(cloud.distro, "expire_passwd")
- setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, args=[])
if bool(cfg["chpasswd"]["expire"]):
assert m_expire.call_args_list == [
@@ -537,7 +536,7 @@ class TestExpire:
mocker.patch.object(cloud.distro, "chpasswd")
m_expire = mocker.patch.object(cloud.distro, "expire_passwd")
- setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, args=[])
if bool(cfg["chpasswd"]["expire"]):
assert m_expire.call_args_list == [
diff --git a/tests/unittests/config/test_cc_snap.py b/tests/unittests/config/test_cc_snap.py
index 432f72ce..9ca0da37 100644
--- a/tests/unittests/config/test_cc_snap.py
+++ b/tests/unittests/config/test_cc_snap.py
@@ -329,7 +329,7 @@ class TestHandle:
cfg = {
"snap": {"assertions": [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}
}
- handle("snap", cfg=cfg, cloud=fake_cloud, log=mock.Mock(), args=None)
+ handle("snap", cfg=cfg, cloud=fake_cloud, args=None)
content = "\n".join(cfg["snap"]["assertions"])
util.write_file(compare_file, content.encode("utf-8"))
assert util.load_file(compare_file) == util.load_file(assert_file)
diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py
index 66368d0f..3fa9fcf8 100644
--- a/tests/unittests/config/test_cc_ssh.py
+++ b/tests/unittests/config/test_cc_ssh.py
@@ -114,7 +114,7 @@ class TestHandleSsh:
m_nug.return_value = ([], {})
cc_ssh.PUBLISH_HOST_KEYS = False
cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cc_ssh.handle("name", cfg, cloud, LOG, None)
+ cc_ssh.handle("name", cfg, cloud, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
options = options.replace("$DISABLE_USER", "root")
m_glob.assert_called_once_with("/etc/ssh/ssh_host_*key*")
@@ -145,7 +145,7 @@ class TestHandleSsh:
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cc_ssh.handle("name", cfg, cloud, LOG, None)
+ cc_ssh.handle("name", cfg, cloud, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
@@ -196,7 +196,7 @@ class TestHandleSsh:
cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys})
if mock_get_public_ssh_keys:
cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
- cc_ssh.handle("name", cfg, cloud, LOG, None)
+ cc_ssh.handle("name", cfg, cloud, None)
if empty_opts:
options = ""
@@ -279,7 +279,7 @@ class TestHandleSsh:
]
)
]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
+ cc_ssh.handle("name", cfg, cloud, None)
assert (
expected_calls == cloud.datasource.publish_host_keys.call_args_list
)
@@ -363,7 +363,7 @@ class TestHandleSsh:
with mock.patch(
MODPATH + "ssh_util.parse_ssh_config", return_value=[]
):
- cc_ssh.handle("name", cfg, get_cloud(distro="ubuntu"), LOG, None)
+ cc_ssh.handle("name", cfg, get_cloud(distro="ubuntu"), None)
# Check that all expected output has been done.
for call_ in expected_calls:
@@ -410,7 +410,7 @@ class TestHandleSsh:
with mock.patch(
MODPATH + "ssh_util.parse_ssh_config", return_value=[]
):
- cc_ssh.handle("name", cfg, get_cloud("ubuntu"), LOG, None)
+ cc_ssh.handle("name", cfg, get_cloud("ubuntu"), None)
assert [] == m_write_file.call_args_list
expected_log_msgs = [
f'Skipping {reason} ssh_keys entry: "{key_type}_private"',
diff --git a/tests/unittests/config/test_cc_ssh_import_id.py b/tests/unittests/config/test_cc_ssh_import_id.py
index ffeee92e..572094a4 100644
--- a/tests/unittests/config/test_cc_ssh_import_id.py
+++ b/tests/unittests/config/test_cc_ssh_import_id.py
@@ -74,5 +74,5 @@ class TestHandleSshImportIDs:
"""Skip config without ssh_import_id"""
m_which.return_value = None
cloud = get_cloud("ubuntu")
- cc_ssh_import_id.handle("name", cfg, cloud, LOG, [])
+ cc_ssh_import_id.handle("name", cfg, cloud, [])
assert log in caplog.text
diff --git a/tests/unittests/config/test_cc_timezone.py b/tests/unittests/config/test_cc_timezone.py
index f76397b7..b02c051c 100644
--- a/tests/unittests/config/test_cc_timezone.py
+++ b/tests/unittests/config/test_cc_timezone.py
@@ -40,7 +40,7 @@ class TestTimezone(t_help.FilesystemMockingTestCase):
"/usr/share/zoneinfo/%s" % cfg["timezone"], dummy_contents
)
- cc_timezone.handle("cc_timezone", cfg, cc, LOG, [])
+ cc_timezone.handle("cc_timezone", cfg, cc, [])
contents = util.load_file("/etc/sysconfig/clock", decode=False)
n_cfg = ConfigObj(BytesIO(contents))
diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py
index 34038cca..dd12941d 100644
--- a/tests/unittests/config/test_cc_ubuntu_advantage.py
+++ b/tests/unittests/config/test_cc_ubuntu_advantage.py
@@ -804,7 +804,7 @@ class TestHandle:
caplog,
):
"""Non-Pro schemas and instance."""
- handle("nomatter", cfg=cfg, cloud=cloud, log=None, args=None)
+ handle("nomatter", cfg=cfg, cloud=cloud, args=None)
for record_tuple in log_record_tuples:
assert record_tuple in caplog.record_tuples
if maybe_install_call_args_list is not None:
@@ -936,7 +936,7 @@ class TestHandle:
m_auto_attach.side_effect = auto_attach_side_effect
with expectation:
- handle("nomatter", cfg=cfg, cloud=cloud, log=None, args=None)
+ handle("nomatter", cfg=cfg, cloud=cloud, args=None)
for record_tuple in log_record_tuples:
assert record_tuple in caplog.record_tuples
@@ -981,7 +981,7 @@ class TestHandle:
enable or disable ua auto-attach.
"""
m_should_auto_attach.return_value = is_pro
- handle("nomatter", cfg=cfg, cloud=self.cloud, log=None, args=None)
+ handle("nomatter", cfg=cfg, cloud=self.cloud, args=None)
assert not m_attach.call_args_list
@pytest.mark.parametrize(
@@ -1012,7 +1012,7 @@ class TestHandle:
self, m_configure_ua, cfg, handle_kwargs, match
):
with pytest.raises(RuntimeError, match=match):
- handle("nomatter", cfg=cfg, log=mock.Mock(), **handle_kwargs)
+ handle("nomatter", cfg=cfg, **handle_kwargs)
assert 0 == m_configure_ua.call_count
@pytest.mark.parametrize(
@@ -1035,7 +1035,6 @@ class TestHandle:
handle(
"nomatter",
cfg=cfg,
- log=mock.Mock(),
cloud=self.cloud,
args=None,
)
@@ -1062,7 +1061,6 @@ class TestHandle:
handle(
"nomatter",
cfg=cfg,
- log=mock.Mock(),
cloud=self.cloud,
args=None,
)
@@ -1084,7 +1082,6 @@ class TestHandle:
handle(
"nomatter",
cfg=cfg,
- log=mock.Mock(),
cloud=self.cloud,
args=None,
)
@@ -1134,9 +1131,13 @@ class TestShouldAutoAttach:
)
if expected_result is None: # UA API does respond
assert should_auto_attach_value == _should_auto_attach(ua_section)
+ assert (
+ "Checking if the instance can be attached to Ubuntu Pro took"
+ in caplog.text
+ )
else: # cloud-init does respond
assert expected_result == _should_auto_attach(ua_section)
- assert not caplog.text
+ assert not caplog.text
class TestAutoAttach:
@@ -1165,7 +1166,7 @@ class TestAutoAttach:
"uaclient.api.u.pro.attach.auto.full_auto_attach.v1"
] = mock.Mock()
_auto_attach(self.ua_section)
- assert not caplog.text
+ assert "Attaching to Ubuntu Pro took" in caplog.text
class TestAttach:
diff --git a/tests/unittests/config/test_cc_ubuntu_autoinstall.py b/tests/unittests/config/test_cc_ubuntu_autoinstall.py
index 87f44f82..b130ecb6 100644
--- a/tests/unittests/config/test_cc_ubuntu_autoinstall.py
+++ b/tests/unittests/config/test_cc_ubuntu_autoinstall.py
@@ -116,7 +116,7 @@ class TestHandleAutoinstall:
):
subp.return_value = snap_list, ""
cloud = get_cloud(distro="ubuntu")
- cc_ubuntu_autoinstall.handle("name", cfg, cloud, LOG, None)
+ cc_ubuntu_autoinstall.handle("name", cfg, cloud, None)
assert subp_calls == subp.call_args_list
for log in logs:
assert log in caplog.text
diff --git a/tests/unittests/config/test_cc_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py
index 6fbc47bd..822c79ce 100644
--- a/tests/unittests/config/test_cc_ubuntu_drivers.py
+++ b/tests/unittests/config/test_cc_ubuntu_drivers.py
@@ -88,7 +88,7 @@ class TestUbuntuDrivers:
debconf_file = tdir.join("nvidia.template")
m_tmp.return_value = tdir
myCloud = mock.MagicMock()
- drivers.handle("ubuntu_drivers", new_config, myCloud, None, None)
+ drivers.handle("ubuntu_drivers", new_config, myCloud, None)
assert [
mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
@@ -122,7 +122,7 @@ class TestUbuntuDrivers:
)
with pytest.raises(Exception):
- drivers.handle("ubuntu_drivers", cfg_accepted, myCloud, None, None)
+ drivers.handle("ubuntu_drivers", cfg_accepted, myCloud, None)
assert [
mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
@@ -185,19 +185,19 @@ class TestUbuntuDrivers:
):
"""Helper to reduce repetition when testing negative cases"""
myCloud = mock.MagicMock()
- drivers.handle("ubuntu_drivers", config, myCloud, None, None)
+ drivers.handle("ubuntu_drivers", config, myCloud, None)
assert 0 == myCloud.distro.install_packages.call_count
assert 0 == m_subp.call_count
@mock.patch(MPATH + "install_drivers")
+ @mock.patch(MPATH + "LOG")
def test_handle_no_drivers_does_nothing(
- self, m_install_drivers, m_debconf, cfg_accepted, install_gpgpu
+ self, m_log, m_install_drivers, m_debconf, cfg_accepted, install_gpgpu
):
"""If no 'drivers' key in the config, nothing should be done."""
myCloud = mock.MagicMock()
- myLog = mock.MagicMock()
- drivers.handle("ubuntu_drivers", {"foo": "bzr"}, myCloud, myLog, None)
- assert "Skipping module named" in myLog.debug.call_args_list[0][0][0]
+ drivers.handle("ubuntu_drivers", {"foo": "bzr"}, myCloud, None)
+ assert "Skipping module named" in m_log.debug.call_args_list[0][0][0]
assert 0 == m_install_drivers.call_count
@mock.patch(M_TMP_PATH)
@@ -267,7 +267,7 @@ class TestUbuntuDrivers:
)
with pytest.raises(Exception):
- drivers.handle("ubuntu_drivers", cfg_accepted, myCloud, None, None)
+ drivers.handle("ubuntu_drivers", cfg_accepted, myCloud, None)
assert [
mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
@@ -304,9 +304,7 @@ class TestUbuntuDrivers:
"drivers": {"nvidia": {"license-accepted": True, "version": None}}
}
with pytest.raises(AttributeError):
- drivers.handle(
- "ubuntu_drivers", version_none_cfg, myCloud, None, None
- )
+ drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None)
assert (
0 == m_debconf.DebconfCommunicator.__enter__().command.call_count
)
@@ -335,7 +333,7 @@ class TestUbuntuDriversWithVersion:
version_none_cfg = {
"drivers": {"nvidia": {"license-accepted": True, "version": None}}
}
- drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None, None)
+ drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None)
assert [
mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
@@ -349,20 +347,19 @@ class TestUbuntuDriversNotRun:
@mock.patch(MPATH + "HAS_DEBCONF", True)
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "install_drivers")
+ @mock.patch(MPATH + "LOG")
def test_no_cfg_drivers_does_nothing(
self,
+ m_log,
m_install_drivers,
m_tmp,
m_debconf,
tmpdir,
):
m_tmp.return_value = tmpdir
- m_log = mock.MagicMock()
myCloud = mock.MagicMock()
version_none_cfg = {}
- drivers.handle(
- "ubuntu_drivers", version_none_cfg, myCloud, m_log, None
- )
+ drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None)
assert 0 == m_install_drivers.call_count
assert (
mock.call(
@@ -375,20 +372,19 @@ class TestUbuntuDriversNotRun:
@mock.patch(MPATH + "HAS_DEBCONF", False)
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "install_drivers")
+ @mock.patch(MPATH + "LOG")
def test_has_not_debconf_does_nothing(
self,
+ m_log,
m_install_drivers,
m_tmp,
m_debconf,
tmpdir,
):
m_tmp.return_value = tmpdir
- m_log = mock.MagicMock()
myCloud = mock.MagicMock()
version_none_cfg = {"drivers": {"nvidia": {"license-accepted": True}}}
- drivers.handle(
- "ubuntu_drivers", version_none_cfg, myCloud, m_log, None
- )
+ drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None)
assert 0 == m_install_drivers.call_count
assert (
mock.call(
diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py
index 6ee6f197..244ac170 100644
--- a/tests/unittests/config/test_cc_update_etc_hosts.py
+++ b/tests/unittests/config/test_cc_update_etc_hosts.py
@@ -44,7 +44,7 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- cc_update_etc_hosts.handle("test", cfg, cc, LOG, [])
+ cc_update_etc_hosts.handle("test", cfg, cc, [])
contents = util.load_file("%s/etc/hosts" % self.tmp)
if "127.0.1.1\tcloud-init.test.us\tcloud-init" not in contents:
self.assertIsNone("No entry for 127.0.1.1 in etc/hosts")
@@ -67,7 +67,7 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
ds = None
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
- cc_update_etc_hosts.handle("test", cfg, cc, LOG, [])
+ cc_update_etc_hosts.handle("test", cfg, cc, [])
contents = util.load_file("%s/etc/hosts" % self.tmp)
if "127.0.1.1 cloud-init.test.us cloud-init" not in contents:
self.assertIsNone("No entry for 127.0.1.1 in etc/hosts")
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
index 6a026a87..48f80ea8 100644
--- a/tests/unittests/config/test_cc_users_groups.py
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -42,7 +42,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
)
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
m_user.assert_not_called()
m_group.assert_not_called()
@@ -62,7 +62,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
)
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
self.assertCountEqual(
m_user.call_args_list,
[
@@ -107,7 +107,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro="freebsd", sys_cfg=sys_cfg, metadata=metadata
)
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
self.assertCountEqual(
m_fbsd_user.call_args_list,
[
@@ -142,7 +142,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
)
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
self.assertCountEqual(
m_user.call_args_list,
[
@@ -183,7 +183,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
)
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
self.assertCountEqual(
m_user.call_args_list,
[
@@ -216,7 +216,7 @@ class TestHandleUsersGroups(CiTestCase):
}
cloud = self.tmp_cloud(distro="ubuntu", sys_cfg={}, metadata={})
with self.assertRaises(ValueError) as context_manager:
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
m_group.assert_not_called()
self.assertEqual(
"Not creating user me2. Key(s) ssh_import_id cannot be provided"
@@ -246,7 +246,7 @@ class TestHandleUsersGroups(CiTestCase):
distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
)
with self.assertRaises(ValueError) as context_manager:
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
m_group.assert_not_called()
self.assertEqual(
"Not creating user me2. Invalid value of ssh_redirect_user:"
@@ -270,7 +270,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
)
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
self.assertCountEqual(
m_user.call_args_list,
[
@@ -296,7 +296,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
)
- cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ cc_users_groups.handle("modulename", cfg, cloud, None)
m_user.assert_called_once_with("me2", default=False)
m_group.assert_not_called()
self.assertEqual(
diff --git a/tests/unittests/config/test_cc_wireguard.py b/tests/unittests/config/test_cc_wireguard.py
index 6c91625b..a2890586 100644
--- a/tests/unittests/config/test_cc_wireguard.py
+++ b/tests/unittests/config/test_cc_wireguard.py
@@ -209,9 +209,7 @@ class TestWireGuard(CiTestCase):
def test_handle_no_config(self, m_maybe_install_wireguard_packages):
"""When no wireguard configuration is provided, nothing happens."""
cfg = {}
- cc_wireguard.handle(
- "wg", cfg=cfg, cloud=None, log=self.logger, args=None
- )
+ cc_wireguard.handle("wg", cfg=cfg, cloud=None, args=None)
self.assertIn(
"DEBUG: Skipping module named wg, no 'wireguard'"
" configuration found",
diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py
index a9a40265..8facf155 100644
--- a/tests/unittests/config/test_cc_write_files.py
+++ b/tests/unittests/config/test_cc_write_files.py
@@ -160,7 +160,7 @@ class TestWriteFiles(FilesystemMockingTestCase):
]
}
cc = self.tmp_cloud("ubuntu")
- handle("ignored", cfg, cc, LOG, [])
+ handle("ignored", cfg, cc, [])
assert content == util.load_file(file_path)
self.assertNotIn(
"Unknown encoding type text/plain", self.logs.getvalue()
@@ -171,7 +171,7 @@ class TestWriteFiles(FilesystemMockingTestCase):
file_path = "/tmp/deferred.file"
config = {"write_files": [{"path": file_path, "defer": True}]}
cc = self.tmp_cloud("ubuntu")
- handle("cc_write_file", config, cc, LOG, [])
+ handle("cc_write_file", config, cc, [])
with self.assertRaises(FileNotFoundError):
util.load_file(file_path)
diff --git a/tests/unittests/config/test_cc_write_files_deferred.py b/tests/unittests/config/test_cc_write_files_deferred.py
index ed2056bb..f4126c42 100644
--- a/tests/unittests/config/test_cc_write_files_deferred.py
+++ b/tests/unittests/config/test_cc_write_files_deferred.py
@@ -44,7 +44,7 @@ class TestWriteFilesDeferred(FilesystemMockingTestCase):
]
}
cc = self.tmp_cloud("ubuntu")
- handle("cc_write_files_deferred", config, cc, LOG, [])
+ handle("cc_write_files_deferred", config, cc, [])
self.assertEqual(util.load_file("/tmp/deferred.file"), expected)
with self.assertRaises(FileNotFoundError):
util.load_file("/tmp/not_deferred.file")
diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py
index 6edd21f4..6da44604 100644
--- a/tests/unittests/config/test_cc_yum_add_repo.py
+++ b/tests/unittests/config/test_cc_yum_add_repo.py
@@ -41,7 +41,7 @@ class TestConfig(helpers.FilesystemMockingTestCase):
},
}
self.patchUtils(self.tmp)
- cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, [])
self.assertRaises(
IOError, util.load_file, "/etc/yum.repos.d/epel_testing.repo"
)
@@ -61,7 +61,7 @@ class TestConfig(helpers.FilesystemMockingTestCase):
}
self.patchUtils(self.tmp)
self.patchOS(self.tmp)
- cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, [])
contents = util.load_file("/etc/yum.repos.d/epel-testing.repo")
parser = configparser.ConfigParser()
parser.read_string(contents)
@@ -101,7 +101,7 @@ class TestConfig(helpers.FilesystemMockingTestCase):
}
}
self.patchUtils(self.tmp)
- cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, [])
contents = util.load_file("/etc/yum.repos.d/puppetlabs-products.repo")
parser = configparser.ConfigParser()
parser.read_string(contents)
diff --git a/tests/unittests/config/test_cc_zypper_add_repo.py b/tests/unittests/config/test_cc_zypper_add_repo.py
index 4304fee1..12b0bc2a 100644
--- a/tests/unittests/config/test_cc_zypper_add_repo.py
+++ b/tests/unittests/config/test_cc_zypper_add_repo.py
@@ -162,7 +162,7 @@ class TestConfig(helpers.FilesystemMockingTestCase):
os.makedirs("%s/etc/zypp/repos.d" % root_d)
helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"})
self.reRoot(root_d)
- cc_zypper_add_repo.handle("zypper_add_repo", cfg, None, LOG, [])
+ cc_zypper_add_repo.handle("zypper_add_repo", cfg, None, [])
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
expected = [
diff --git a/tests/unittests/config/test_modules.py b/tests/unittests/config/test_modules.py
index bc105064..15b84bfa 100644
--- a/tests/unittests/config/test_modules.py
+++ b/tests/unittests/config/test_modules.py
@@ -2,6 +2,7 @@
import importlib
+import inspect
import logging
from pathlib import Path
from typing import List
@@ -13,6 +14,7 @@ from cloudinit.config.modules import ModuleDetails, Modules, _is_active
from cloudinit.config.schema import MetaSchema
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import FREQUENCIES
+from cloudinit.stages import Init
from tests.unittests.helpers import cloud_init_project_dir, mock
M_PATH = "cloudinit.config.modules."
@@ -21,7 +23,7 @@ M_PATH = "cloudinit.config.modules."
def get_module_names() -> List[str]:
"""Return list of module names in cloudinit/config"""
files = list(
- Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py")
+ Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py"),
)
return [mod.stem for mod in files]
@@ -172,3 +174,49 @@ class TestModules:
mock.call([list(module_details)])
] == m_run_modules.call_args_list
assert "Skipping" not in caplog.text
+
+ @mock.patch(M_PATH + "signature")
+ @mock.patch("cloudinit.config.modules.ReportEventStack")
+ def test_old_handle(self, event, m_signature, caplog):
+ def handle(name, cfg, cloud, log, args):
+ pass
+
+ m_signature.return_value = inspect.signature(handle)
+ module = mock.Mock()
+ module.handle.side_effect = handle
+ mods = Modules(
+ init=mock.Mock(spec=Init),
+ cfg_files=mock.Mock(),
+ reporter=mock.Mock(),
+ )
+ mods._cached_cfg = {}
+ module_details = ModuleDetails(
+ module=module,
+ name="mod_name",
+ frequency=["always"],
+ run_args=[],
+ )
+ m_cc = mods.init.cloudify.return_value
+ m_cc.run.return_value = (1, "doesnotmatter")
+
+ mods._run_modules([module_details])
+
+ assert [
+ mock.call(
+ mock.ANY,
+ mock.ANY,
+ {
+ "name": "mod_name",
+ "cfg": {},
+ "cloud": mock.ANY,
+ "args": [],
+ "log": mock.ANY,
+ },
+ freq=["always"],
+ )
+ ] == m_cc.run.call_args_list
+
+ assert (
+ "Config modules with a `log` parameter is deprecated in 23.2"
+ in caplog.text
+ )
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
index d43af5cc..8276511d 100644
--- a/tests/unittests/config/test_schema.py
+++ b/tests/unittests/config/test_schema.py
@@ -19,7 +19,7 @@ from typing import List, Optional, Sequence, Set
import pytest
-from cloudinit import stages
+from cloudinit import log, stages
from cloudinit.config.schema import (
CLOUD_CONFIG_HEADER,
VERSIONED_USERDATA_SCHEMA_FILE,
@@ -55,6 +55,7 @@ from tests.unittests.helpers import (
from tests.unittests.util import FakeDataSource
M_PATH = "cloudinit.config.schema."
+DEPRECATED_LOG_LEVEL = 35
def get_schemas() -> dict:
@@ -645,6 +646,7 @@ class TestValidateCloudConfigSchema:
def test_validateconfig_logs_deprecations(
self, schema, config, expected_msg, log_deprecations, caplog
):
+ log.setupLogging()
validate_cloudconfig_schema(
config,
schema,
@@ -653,7 +655,7 @@ class TestValidateCloudConfigSchema:
)
if expected_msg is None:
return
- log_record = (M_PATH[:-1], logging.WARNING, expected_msg)
+ log_record = (M_PATH[:-1], DEPRECATED_LOG_LEVEL, expected_msg)
if log_deprecations:
assert log_record == caplog.record_tuples[-1]
else:
diff --git a/tests/unittests/distros/test__init__.py b/tests/unittests/distros/test__init__.py
index ea017d58..7c5187fd 100644
--- a/tests/unittests/distros/test__init__.py
+++ b/tests/unittests/distros/test__init__.py
@@ -221,102 +221,6 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
["pw", "usermod", "myuser", "-p", "01-Jan-1970"]
)
- @mock.patch("cloudinit.distros.uses_systemd")
- @mock.patch(
- "cloudinit.distros.subp.which",
- )
- @mock.patch(
- "cloudinit.distros.subp.subp",
- )
- def test_virtualization_detected(self, m_subp, m_which, m_uses_systemd):
- m_uses_systemd.return_value = True
- m_which.return_value = "/usr/bin/systemd-detect-virt"
- m_subp.return_value = ("kvm", None)
-
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.assertTrue(d.is_virtual)
-
- @mock.patch("cloudinit.distros.uses_systemd")
- @mock.patch(
- "cloudinit.distros.subp.subp",
- )
- def test_virtualization_not_detected(self, m_subp, m_uses_systemd):
- m_uses_systemd.return_value = True
- m_subp.return_value = ("none", None)
-
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.assertFalse(d.is_virtual)
-
- @mock.patch("cloudinit.distros.uses_systemd")
- def test_virtualization_unknown(self, m_uses_systemd):
- m_uses_systemd.return_value = True
-
- from cloudinit.subp import ProcessExecutionError
-
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- with mock.patch(
- "cloudinit.distros.subp.which",
- return_value=None,
- ):
- self.assertIsNone(
- d.is_virtual,
- "Reflect unknown state when detection"
- " binary cannot be found",
- )
-
- with mock.patch(
- "cloudinit.distros.subp.subp",
- side_effect=ProcessExecutionError(),
- ):
- self.assertIsNone(
- d.is_virtual, "Reflect unknown state on ProcessExecutionError"
- )
-
- def test_virtualization_on_freebsd(self):
- # This test function is a bit unusual:
- # We need to first mock away the `ifconfig -a` subp call
- # Then, we can use side-effects to get the results of two subp calls
- # needed for is_container()/virtual() which is_virtual depends on.
- # We also have to clear cache between each of those assertions.
-
- cls = distros.fetch("freebsd")
- with mock.patch(
- "cloudinit.distros.subp.subp", return_value=("", None)
- ):
- d = cls("freebsd", {}, None)
- # This mock is called by `sysctl -n security.jail.jailed`
- with mock.patch(
- "cloudinit.distros.subp.subp",
- side_effect=[("0\n", None), ("literaly any truthy value", None)],
- ):
- self.assertFalse(d.is_container())
- d.is_container.cache_clear()
- self.assertTrue(d.is_container())
- d.is_container.cache_clear()
-
- # This mock is called by `sysctl -n kern.vm_guest`
- with mock.patch(
- "cloudinit.distros.subp.subp",
- # fmt: off
- side_effect=[
- ("0\n", None), ("hv\n", None), # virtual
- ("0\n", None), ("none\n", None), # physical
- ("0\n", None), ("hv\n", None) # virtual
- ],
- # fmt: on
- ):
- self.assertEqual(d.virtual(), "microsoft")
- d.is_container.cache_clear()
- d.virtual.cache_clear()
- self.assertEqual(d.virtual(), "none")
- d.is_container.cache_clear()
- d.virtual.cache_clear()
-
- self.assertTrue(d.is_virtual)
-
class TestGetPackageMirrors:
def return_first(self, mlist):
diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py
index edc152e1..2a3d85b8 100644
--- a/tests/unittests/distros/test_create_users.py
+++ b/tests/unittests/distros/test_create_users.py
@@ -2,7 +2,7 @@
import re
-from cloudinit import distros, ssh_util
+from cloudinit import distros, log, ssh_util
from tests.unittests.helpers import CiTestCase, mock
from tests.unittests.util import abstract_to_concrete
@@ -140,6 +140,7 @@ class TestCreateUser(CiTestCase):
self, m_is_group, m_subp, m_is_snappy
):
"""users.groups supports a dict value, but emit deprecation log."""
+ log.setupLogging()
user = "foouser"
self.dist.create_user(user, groups={"group1": None, "group2": None})
expected = [
@@ -150,10 +151,15 @@ class TestCreateUser(CiTestCase):
]
self.assertEqual(m_subp.call_args_list, expected)
self.assertIn(
- "WARNING: DEPRECATED: The user foouser has a 'groups' config"
- " value of type dict which is deprecated and will be removed in a"
- " future version of cloud-init. Use a comma-delimited string or"
- " array instead: group1,group2.",
+ "DEPRECAT",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "The user foouser has a 'groups' config value of type dict",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "Use a comma-delimited",
self.logs.getvalue(),
)
@@ -182,9 +188,9 @@ class TestCreateUser(CiTestCase):
],
)
self.assertIn(
- "WARNING: DEPRECATED: The user foouser has a 'sudo' config value"
- " of 'false' which will be dropped after April 2027. Use 'null'"
- " instead.",
+ "DEPRECATED: The value of 'false' in user foouser's 'sudo' "
+ "config is deprecated in 22.3 and scheduled to be removed"
+ " in 27.3. Use 'null' instead.",
self.logs.getvalue(),
)
diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py
index e9fb0591..b1c89ce3 100644
--- a/tests/unittests/distros/test_netconfig.py
+++ b/tests/unittests/distros/test_netconfig.py
@@ -458,8 +458,16 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
def eni_path(self):
return "/etc/network/interfaces.d/50-cloud-init.cfg"
+ def rules_path(self):
+ return "/etc/udev/rules.d/70-persistent-net.rules"
+
def _apply_and_verify_eni(
- self, apply_fn, config, expected_cfgs=None, bringup=False
+ self,
+ apply_fn,
+ config,
+ expected_cfgs=None,
+ bringup=False,
+ previous_files=(),
):
if not expected_cfgs:
raise ValueError("expected_cfg must not be None")
@@ -467,7 +475,11 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
tmpd = None
with mock.patch("cloudinit.net.eni.available") as m_avail:
m_avail.return_value = True
+ path_modes = {}
with self.reRooted(tmpd) as tmpd:
+ for previous_path, content, mode in previous_files:
+ util.write_file(previous_path, content, mode=mode)
+ path_modes[previous_path] = mode
apply_fn(config, bringup)
results = dir2dict(tmpd)
@@ -478,7 +490,9 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
print(results[cfgpath])
print("----------")
self.assertEqual(expected, results[cfgpath])
- self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+ self.assertEqual(
+ path_modes.get(cfgpath, 0o644), get_mode(cfgpath, tmpd)
+ )
def test_apply_network_config_and_bringup_filters_priority_eni_ub(self):
"""Network activator search priority can be overridden from config."""
@@ -527,11 +541,13 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
def test_apply_network_config_eni_ub(self):
expected_cfgs = {
self.eni_path(): V1_NET_CFG_OUTPUT,
+ self.rules_path(): "",
}
self._apply_and_verify_eni(
self.distro.apply_network_config,
V1_NET_CFG,
expected_cfgs=expected_cfgs.copy(),
+ previous_files=((self.rules_path(), "something", 0o660),),
)
def test_apply_network_config_ipv6_ub(self):
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index fec63809..32503fb8 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -173,7 +173,7 @@ class CiTestCase(TestCase):
)
if pass_through:
return _real_subp(*args, **kwargs)
- raise Exception(
+ raise RuntimeError(
"called subp. set self.allowed_subp=True to allow\n subp(%s)"
% ", ".join(
[str(repr(a)) for a in args]
diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py
index 40340553..2b680e1f 100644
--- a/tests/unittests/net/test_dhcp.py
+++ b/tests/unittests/net/test_dhcp.py
@@ -221,7 +221,6 @@ class TestDHCPRFC3442(CiTestCase):
class TestDHCPParseStaticRoutes(CiTestCase):
-
with_logs = True
def parse_static_routes_empty_string(self):
@@ -336,6 +335,7 @@ class TestDHCPParseStaticRoutes(CiTestCase):
class TestDHCPDiscoveryClean(CiTestCase):
with_logs = True
+ ib_address_prefix = "00:00:00:00:00:00:00:00:00:00:00:00"
@mock.patch("cloudinit.net.dhcp.find_fallback_nic")
def test_no_fallback_nic_found(self, m_fallback_nic):
@@ -450,12 +450,21 @@ class TestDHCPDiscoveryClean(CiTestCase):
)
m_kill.assert_not_called()
+ @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=False)
@mock.patch("cloudinit.net.dhcp.os.remove")
@mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.subp.subp")
@mock.patch("cloudinit.util.wait_for_files", return_value=False)
- def test_dhcp_discovery(self, m_wait, m_subp, m_kill, m_getppid, m_remove):
+ def test_dhcp_discovery(
+ self,
+ m_wait,
+ m_subp,
+ m_kill,
+ m_getppid,
+ m_remove,
+ mocked_is_ib_interface,
+ ):
"""dhcp_discovery brings up the interface and runs dhclient.
It also returns the parsed dhcp.leases file.
@@ -512,6 +521,95 @@ class TestDHCPDiscoveryClean(CiTestCase):
]
)
m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+ mocked_is_ib_interface.assert_called_once_with("eth9")
+
+ @mock.patch("cloudinit.temp_utils.get_tmp_ancestor", return_value="/tmp")
+ @mock.patch("cloudinit.util.write_file")
+ @mock.patch(
+ "cloudinit.net.dhcp.get_interface_mac",
+ return_value="%s:AA:AA:AA:00:00:AA:AA:AA" % ib_address_prefix,
+ )
+ @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=True)
+ @mock.patch("cloudinit.net.dhcp.os.remove")
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid", return_value=1)
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp", return_value=("", ""))
+ @mock.patch("cloudinit.util.wait_for_files", return_value=False)
+ def test_dhcp_discovery_ib(
+ self,
+ m_wait,
+ m_subp,
+ m_kill,
+ m_getppid,
+ m_remove,
+ mocked_is_ib_interface,
+ get_interface_mac,
+ mocked_write_file,
+ mocked_get_tmp_ancestor,
+ ):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file.
+ """
+ lease_content = dedent(
+ """
+ lease {
+ interface "ib0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ my_pid = 1
+ with mock.patch(
+ "cloudinit.util.load_file", side_effect=["1", lease_content]
+ ):
+ self.assertCountEqual(
+ [
+ {
+ "interface": "ib0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ dhcp_discovery("/sbin/dhclient", "ib0"),
+ )
+ # Interface was brought up before dhclient called
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "dev", "ib0", "up"], capture=True
+ ),
+ mock.call(
+ [
+ DHCLIENT,
+ "-1",
+ "-v",
+ "-lf",
+ LEASE_F,
+ "-pf",
+ PID_F,
+ "ib0",
+ "-sf",
+ "/bin/true",
+ "-cf",
+ "/tmp/ib0-dhclient.conf",
+ ],
+ capture=True,
+ ),
+ ]
+ )
+ m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+ mocked_is_ib_interface.assert_called_once_with("ib0")
+ get_interface_mac.assert_called_once_with("ib0")
+ mocked_get_tmp_ancestor.assert_called_once_with(needs_exe=True)
+ mocked_write_file.assert_called_once_with(
+ "/tmp/ib0-dhclient.conf",
+ 'interface "ib0" {send dhcp-client-identifier '
+ "20:AA:AA:AA:00:00:AA:AA:AA;}",
+ )
@mock.patch("cloudinit.net.dhcp.os.remove")
@mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
@@ -552,7 +650,6 @@ class TestDHCPDiscoveryClean(CiTestCase):
class TestSystemdParseLeases(CiTestCase):
-
lxd_lease = dedent(
"""\
# This is private data. Do not parse.
diff --git a/tests/unittests/net/test_ephemeral.py b/tests/unittests/net/test_ephemeral.py
index 0cefd04a..d2237faf 100644
--- a/tests/unittests/net/test_ephemeral.py
+++ b/tests/unittests/net/test_ephemeral.py
@@ -22,22 +22,16 @@ class TestEphemeralIPNetwork:
m_exit_stack,
ipv4,
ipv6,
- tmpdir,
):
interface = object()
- tmp_dir = str(tmpdir)
- with EphemeralIPNetwork(
- interface, ipv4=ipv4, ipv6=ipv6, tmp_dir=tmp_dir
- ):
+ with EphemeralIPNetwork(interface, ipv4=ipv4, ipv6=ipv6):
pass
expected_call_args_list = []
if ipv4:
expected_call_args_list.append(
mock.call(m_ephemeral_dhcp_v4.return_value)
)
- assert [
- mock.call(interface, tmp_dir=tmp_dir)
- ] == m_ephemeral_dhcp_v4.call_args_list
+ assert [mock.call(interface)] == m_ephemeral_dhcp_v4.call_args_list
else:
assert [] == m_ephemeral_dhcp_v4.call_args_list
if ipv6:
diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py
index 57a4436f..0e1b27b4 100644
--- a/tests/unittests/net/test_network_state.py
+++ b/tests/unittests/net/test_network_state.py
@@ -4,7 +4,7 @@ from unittest import mock
import pytest
-from cloudinit import safeyaml
+from cloudinit import log, safeyaml, util
from cloudinit.net import network_state
from cloudinit.net.netplan import Renderer as NetplanRenderer
from cloudinit.net.renderers import NAME_TO_RENDERER
@@ -214,6 +214,9 @@ class TestNetworkStateParseConfigV2:
In netplan targets we perform a passthrough and the warning is not
needed.
"""
+ log.setupLogging()
+
+ util.deprecate._log = set() # type: ignore
ncfg = safeyaml.load(
cfg.format(
gateway4="gateway4: 10.54.0.1",
@@ -233,7 +236,7 @@ class TestNetworkStateParseConfigV2:
else:
count = 0 # No deprecation as we passthrough
assert count == caplog.text.count(
- "DEPRECATED: The use of `gateway4` and `gateway6` is"
+ "The use of `gateway4` and `gateway6`"
)
diff --git a/tests/unittests/sources/azure/test_imds.py b/tests/unittests/sources/azure/test_imds.py
index b5a72645..03f66502 100644
--- a/tests/unittests/sources/azure/test_imds.py
+++ b/tests/unittests/sources/azure/test_imds.py
@@ -3,20 +3,30 @@
import json
import logging
import math
+import re
from unittest import mock
import pytest
import requests
from cloudinit.sources.azure import imds
-from cloudinit.url_helper import UrlError
+from cloudinit.url_helper import UrlError, readurl
-MOCKPATH = "cloudinit.sources.azure.imds."
+LOG_PATH = "cloudinit.sources.azure.imds"
+MOCK_PATH = "cloudinit.sources.azure.imds."
+
+
+class StringMatch:
+ def __init__(self, regex) -> None:
+ self.regex = regex
+
+ def __eq__(self, other) -> bool:
+ return bool(re.match("^" + self.regex + "$", other))
@pytest.fixture
-def mock_readurl():
- with mock.patch(MOCKPATH + "readurl", autospec=True) as m:
+def wrapped_readurl():
+ with mock.patch.object(imds, "readurl", wraps=readurl) as m:
yield m
@@ -56,54 +66,63 @@ class TestFetchMetadataWithApiFallback:
def test_basic(
self,
caplog,
- mock_readurl,
+ mock_requests_session_request,
+ wrapped_readurl,
):
fake_md = {"foo": {"bar": []}}
- mock_readurl.side_effect = [
- mock.Mock(contents=json.dumps(fake_md).encode()),
+ mock_requests_session_request.side_effect = [
+ mock.Mock(content=json.dumps(fake_md)),
]
md = imds.fetch_metadata_with_api_fallback()
assert md == fake_md
- assert mock_readurl.mock_calls == [
+ assert wrapped_readurl.mock_calls == [
mock.call(
self.default_url,
timeout=self.timeout,
headers=self.headers,
retries=self.retries,
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
infinite=False,
log_req_resp=True,
- ),
+ )
]
-
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[0/11\] open.*"),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch("Read from.*"),
+ ),
]
- assert warnings == []
def test_basic_fallback(
self,
caplog,
- mock_readurl,
+ mock_requests_session_request,
+ wrapped_readurl,
):
fake_md = {"foo": {"bar": []}}
- mock_readurl.side_effect = [
+ mock_requests_session_request.side_effect = [
UrlError("No IMDS version", code=400),
- mock.Mock(contents=json.dumps(fake_md).encode()),
+ mock.Mock(content=json.dumps(fake_md)),
]
md = imds.fetch_metadata_with_api_fallback()
assert md == fake_md
- assert mock_readurl.mock_calls == [
+ assert wrapped_readurl.mock_calls == [
mock.call(
self.default_url,
timeout=self.timeout,
headers=self.headers,
retries=self.retries,
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
infinite=False,
log_req_resp=True,
),
@@ -112,18 +131,38 @@ class TestFetchMetadataWithApiFallback:
timeout=self.timeout,
headers=self.headers,
retries=self.retries,
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
infinite=False,
log_req_resp=True,
),
]
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
- ]
- assert warnings == [
- "Failed to fetch metadata from IMDS: No IMDS version",
- "Falling back to IMDS api-version: 2019-06-01",
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[0/11\] open.*"),
+ ),
+ (
+ LOG_PATH,
+ logging.WARNING,
+ "Failed to fetch metadata from IMDS: No IMDS version",
+ ),
+ (
+ LOG_PATH,
+ logging.WARNING,
+ "Falling back to IMDS api-version: 2019-06-01",
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[0/11\] open.*"),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch("Read from.*"),
+ ),
]
@pytest.mark.parametrize(
@@ -155,11 +194,36 @@ class TestFetchMetadataWithApiFallback:
assert md == fake_md
assert len(mock_requests_session_request.mock_calls) == 2
assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)]
-
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[0/11\] open.*"),
+ ),
+ (
+ LOG_PATH,
+ logging.INFO,
+ StringMatch(
+ "Polling IMDS failed attempt 1 with exception:"
+ f".*{error!s}.*"
+ ),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch("Please wait 1 second.*"),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[1/11\] open.*"),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch("Read from.*"),
+ ),
]
- assert warnings == []
def test_will_retry_errors_on_fallback(
self,
@@ -180,13 +244,58 @@ class TestFetchMetadataWithApiFallback:
assert md == fake_md
assert len(mock_requests_session_request.mock_calls) == 3
assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)]
-
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
- ]
- assert warnings == [
- "Failed to fetch metadata from IMDS: fake error",
- "Falling back to IMDS api-version: 2019-06-01",
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[0/11\] open.*"),
+ ),
+ (
+ LOG_PATH,
+ logging.INFO,
+ StringMatch(
+ "Polling IMDS failed attempt 1 with exception:"
+ f".*{error!s}.*"
+ ),
+ ),
+ (
+ LOG_PATH,
+ logging.WARNING,
+ "Failed to fetch metadata from IMDS: fake error",
+ ),
+ (
+ LOG_PATH,
+ logging.WARNING,
+ "Falling back to IMDS api-version: 2019-06-01",
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[0/11\] open.*"),
+ ),
+ (
+ LOG_PATH,
+ logging.INFO,
+ StringMatch(
+ "Polling IMDS failed attempt 1 with exception:"
+ f".*{error!s}.*"
+ ),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch("Please wait 1 second.*"),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[1/11\] open.*"),
+ ),
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch("Read from.*"),
+ ),
]
@pytest.mark.parametrize(
@@ -221,10 +330,24 @@ class TestFetchMetadataWithApiFallback:
== [mock.call(1)] * self.retries
)
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
+ logs = [x for x in caplog.record_tuples if x[0] == LOG_PATH]
+ assert logs == [
+ (
+ LOG_PATH,
+ logging.INFO,
+ StringMatch(
+ f"Polling IMDS failed attempt {i} with exception:"
+ f".*{error!s}.*"
+ ),
+ )
+ for i in range(1, 12)
+ ] + [
+ (
+ LOG_PATH,
+ logging.WARNING,
+ f"Failed to fetch metadata from IMDS: {error!s}",
+ )
]
- assert warnings == [f"Failed to fetch metadata from IMDS: {error!s}"]
@pytest.mark.parametrize(
"error",
@@ -253,30 +376,47 @@ class TestFetchMetadataWithApiFallback:
assert len(mock_requests_session_request.mock_calls) == 1
assert mock_url_helper_time_sleep.mock_calls == []
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"\[0/11\] open.*"),
+ ),
+ (
+ LOG_PATH,
+ logging.INFO,
+ StringMatch(
+ "Polling IMDS failed attempt 1 with exception:"
+ f".*{error!s}.*"
+ ),
+ ),
+ (
+ LOG_PATH,
+ logging.WARNING,
+ f"Failed to fetch metadata from IMDS: {error!s}",
+ ),
]
- assert warnings == [f"Failed to fetch metadata from IMDS: {error!s}"]
def test_non_json_repsonse(
self,
caplog,
- mock_readurl,
+ mock_requests_session_request,
+ wrapped_readurl,
):
- mock_readurl.side_effect = [
- mock.Mock(contents=b"bad data"),
+ mock_requests_session_request.side_effect = [
+ mock.Mock(content=b"bad data")
]
with pytest.raises(ValueError):
imds.fetch_metadata_with_api_fallback()
- assert mock_readurl.mock_calls == [
+ assert wrapped_readurl.mock_calls == [
mock.call(
self.default_url,
timeout=self.timeout,
headers=self.headers,
retries=self.retries,
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
infinite=False,
log_req_resp=True,
),
@@ -304,17 +444,18 @@ class TestFetchReprovisionData:
def test_basic(
self,
caplog,
- mock_readurl,
+ mock_requests_session_request,
+ wrapped_readurl,
):
content = b"ovf content"
- mock_readurl.side_effect = [
- mock.Mock(contents=content),
+ mock_requests_session_request.side_effect = [
+ mock.Mock(content=content),
]
ovf = imds.fetch_reprovision_data()
assert ovf == content
- assert mock_readurl.mock_calls == [
+ assert wrapped_readurl.mock_calls == [
mock.call(
self.url,
timeout=self.timeout,
@@ -327,10 +468,15 @@ class TestFetchReprovisionData:
assert caplog.record_tuples == [
(
- "cloudinit.sources.azure.imds",
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ StringMatch(r"Read from.*"),
+ ),
+ (
+ LOG_PATH,
logging.DEBUG,
"Polled IMDS 1 time(s)",
- )
+ ),
]
@pytest.mark.parametrize(
@@ -370,10 +516,10 @@ class TestFetchReprovisionData:
)
backoff_logs = [
(
- "cloudinit.sources.azure.imds",
+ LOG_PATH,
logging.INFO,
- "Polling IMDS failed with exception: "
- f"{wrapped_error!r} count: {i}",
+ f"Polling IMDS failed attempt {i} with exception: "
+ f"{wrapped_error!r}",
)
for i in range(1, failures + 1)
if i == 1 or math.log2(i).is_integer()
@@ -382,10 +528,10 @@ class TestFetchReprovisionData:
(
"cloudinit.url_helper",
logging.DEBUG,
- mock.ANY,
+ StringMatch(r"Read from.*"),
),
(
- "cloudinit.sources.azure.imds",
+ LOG_PATH,
logging.DEBUG,
f"Polled IMDS {failures+1} time(s)",
),
@@ -437,20 +583,20 @@ class TestFetchReprovisionData:
backoff_logs = [
(
- "cloudinit.sources.azure.imds",
+ LOG_PATH,
logging.INFO,
- "Polling IMDS failed with exception: "
- f"{wrapped_error!r} count: {i}",
+ f"Polling IMDS failed attempt {i} with exception: "
+ f"{wrapped_error!r}",
)
for i in range(1, failures + 1)
if i == 1 or math.log2(i).is_integer()
]
assert caplog.record_tuples == backoff_logs + [
(
- "cloudinit.sources.azure.imds",
+ LOG_PATH,
logging.INFO,
- "Polling IMDS failed with exception: "
- f"{exc_info.value!r} count: {failures+1}",
+ f"Polling IMDS failed attempt {failures+1} with exception: "
+ f"{exc_info.value!r}",
),
]
@@ -483,9 +629,9 @@ class TestFetchReprovisionData:
assert caplog.record_tuples == [
(
- "cloudinit.sources.azure.imds",
+ LOG_PATH,
logging.INFO,
- "Polling IMDS failed with exception: "
- f"{exc_info.value!r} count: 1",
+ "Polling IMDS failed attempt 1 with exception: "
+ f"{exc_info.value!r}",
),
]
diff --git a/tests/unittests/sources/conftest.py b/tests/unittests/sources/conftest.py
new file mode 100644
index 00000000..ef4d7e61
--- /dev/null
+++ b/tests/unittests/sources/conftest.py
@@ -0,0 +1,9 @@
+from unittest import mock
+
+import pytest
+
+
+@pytest.fixture(autouse=True)
+def mock_util_get_cmdline():
+ with mock.patch("cloudinit.util.get_cmdline", return_value="") as m:
+ yield m
diff --git a/tests/unittests/sources/test___init__.py b/tests/unittests/sources/test___init__.py
new file mode 100644
index 00000000..b84976da
--- /dev/null
+++ b/tests/unittests/sources/test___init__.py
@@ -0,0 +1,40 @@
+import pytest
+
+from cloudinit import sources
+from cloudinit.sources import DataSourceOpenStack as ds
+from tests.unittests.helpers import mock
+
+
+@pytest.mark.parametrize(
+ "m_cmdline",
+ (
+ # test ci.ds=
+ "aosiejfoij ci.ds=OpenStack ",
+ "ci.ds=OpenStack",
+ "aosiejfoij ci.ds=OpenStack blah",
+ "aosiejfoij ci.ds=OpenStack faljskebflk",
+ # test ci.datasource=
+ "aosiejfoij ci.datasource=OpenStack ",
+ "ci.datasource=OpenStack",
+ "aosiejfoij ci.datasource=OpenStack blah",
+ "aosiejfoij ci.datasource=OpenStack faljskebflk",
+ # weird whitespace
+ "ci.datasource=OpenStack\n",
+ "ci.datasource=OpenStack\t",
+ "ci.datasource=OpenStack\r",
+ "ci.datasource=OpenStack\v",
+ "ci.ds=OpenStack\n",
+ "ci.ds=OpenStack\t",
+ "ci.ds=OpenStack\r",
+ "ci.ds=OpenStack\v",
+ ),
+)
+def test_ds_detect_kernel_commandline(m_cmdline):
+ """check commandline match"""
+ with mock.patch(
+ "cloudinit.util.get_cmdline",
+ return_value=m_cmdline,
+ ):
+ assert (
+ ds.DataSourceOpenStack.dsname == sources.parse_cmdline()
+ ), f"could not parse [{m_cmdline}]"
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index b5fe2672..9815c913 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -16,7 +16,6 @@ from cloudinit.net import dhcp
from cloudinit.sources import UNSET
from cloudinit.sources import DataSourceAzure as dsaz
from cloudinit.sources import InvalidMetaDataException
-from cloudinit.sources.azure import imds
from cloudinit.sources.helpers import netlink
from cloudinit.util import (
MountFailedError,
@@ -299,6 +298,15 @@ def patched_reported_ready_marker_path(azure_ds, patched_markers_dir_path):
yield reported_ready_marker
+def fake_http_error_for_code(status_code: int):
+ response_failure = requests.Response()
+ response_failure.status_code = status_code
+ return requests.exceptions.HTTPError(
+ "fake error",
+ response=response_failure,
+ )
+
+
def construct_ovf_env(
*,
custom_data=None,
@@ -1088,8 +1096,8 @@ scbus-1 on xpt0 bus 0
dev = ds.get_resource_disk_on_freebsd(1)
self.assertEqual("da1", dev)
- def test_not_is_platform_viable_seed_should_return_no_datasource(self):
- """Check seed_dir using _is_platform_viable and return False."""
+ def test_not_ds_detect_seed_should_return_no_datasource(self):
+ """Check seed_dir using ds_detect and return False."""
# Return a non-matching asset tag value
data = {}
dsrc = self._get_ds(data)
@@ -2887,36 +2895,38 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
"unknown-245": "624c3620",
}
- # Simulate two NICs by adding the same one twice.
- md = {
- "interface": [
- IMDS_NETWORK_METADATA["interface"][0],
- IMDS_NETWORK_METADATA["interface"][0],
- ]
- }
-
- m_req = mock.Mock(content=json.dumps(md))
- m_request.side_effect = [
- requests.Timeout("Fake connection timeout"),
- requests.ConnectionError("Fake Network Unreachable"),
- m_req,
- ]
+ m_req = mock.Mock(content=json.dumps({"not": "empty"}))
+ m_request.side_effect = (
+ [requests.Timeout("Fake connection timeout")] * 5
+ + [requests.ConnectionError("Fake Network Unreachable")] * 5
+ + 290 * [fake_http_error_for_code(410)]
+ + [m_req]
+ )
m_dhcpv4.return_value.lease = lease
- is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
+ is_primary = dsa._check_if_nic_is_primary("eth0")
self.assertEqual(True, is_primary)
- self.assertEqual(2, expected_nic_count)
- assert len(m_request.mock_calls) == 3
+ assert len(m_request.mock_calls) == 301
- # Re-run tests to verify max retries.
+ # Re-run tests to verify max http failures.
+ m_request.reset_mock()
+ m_request.side_effect = 305 * [fake_http_error_for_code(410)]
+
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ is_primary = dsa._check_if_nic_is_primary("eth1")
+ self.assertEqual(False, is_primary)
+ assert len(m_request.mock_calls) == 301
+
+ # Re-run tests to verify max connection error retries.
m_request.reset_mock()
m_request.side_effect = [
requests.Timeout("Fake connection timeout")
- ] * 6 + [requests.ConnectionError("Fake Network Unreachable")] * 6
+ ] * 9 + [requests.ConnectionError("Fake Network Unreachable")] * 9
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
- is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
+ is_primary = dsa._check_if_nic_is_primary("eth1")
self.assertEqual(False, is_primary)
assert len(m_request.mock_calls) == 11
@@ -3012,8 +3022,10 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_media_switch.return_value = None
dhcp_ctx = mock.MagicMock(lease=lease)
dhcp_ctx.obtain_lease.return_value = lease
+ dhcp_ctx.iface = lease["interface"]
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = dhcp_ctx
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
):
@@ -3021,7 +3033,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
assert m_report_ready.mock_calls == [mock.call()]
- self.assertEqual(3, m_dhcp.call_count, "Expected 3 DHCP calls")
+ self.assertEqual(2, m_dhcp.call_count, "Expected 2 DHCP calls")
assert m_fetch_reprovisiondata.call_count == 2
@mock.patch("os.path.isfile")
@@ -3152,6 +3164,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
distro.get_tmp_exec_path = self.tmp_dir
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
):
@@ -3186,6 +3199,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
distro.get_tmp_exec_path = self.tmp_dir
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
):
@@ -3227,8 +3241,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
}
]
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
self.assertTrue(len(dsa._poll_imds()) > 0)
- self.assertEqual(m_dhcp.call_count, 2)
+ self.assertEqual(m_dhcp.call_count, 1)
m_net.assert_any_call(
broadcast="192.168.2.255",
interface="eth9",
@@ -3237,7 +3252,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
router="192.168.2.1",
static_routes=None,
)
- self.assertEqual(m_net.call_count, 2)
+ self.assertEqual(m_net.call_count, 1)
def test__reprovision_calls__poll_imds(
self, m_fetch_reprovisiondata, m_dhcp, m_net, m_media_switch
@@ -3258,10 +3273,11 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
content = construct_ovf_env(username=username, hostname=hostname)
m_fetch_reprovisiondata.side_effect = [content]
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
md, _ud, cfg, _d = dsa._reprovision()
self.assertEqual(md["local-hostname"], hostname)
self.assertEqual(cfg["system_info"]["default_user"]["name"], username)
- self.assertEqual(m_dhcp.call_count, 2)
+ self.assertEqual(m_dhcp.call_count, 1)
m_net.assert_any_call(
broadcast="192.168.2.255",
interface="eth9",
@@ -3270,7 +3286,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
router="192.168.2.1",
static_routes=None,
)
- self.assertEqual(m_net.call_count, 2)
+ self.assertEqual(m_net.call_count, 1)
class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
@@ -3340,7 +3356,7 @@ class TestIsPlatformViable:
):
mock_chassis_asset_tag.return_value = tag
- assert dsaz.is_platform_viable(None) is True
+ assert dsaz.DataSourceAzure.ds_detect(None) is True
def test_true_on_azure_ovf_env_in_seed_dir(
self, azure_ds, mock_chassis_asset_tag, tmpdir
@@ -3351,7 +3367,7 @@ class TestIsPlatformViable:
seed_path.parent.mkdir(exist_ok=True, parents=True)
seed_path.write_text("")
- assert dsaz.is_platform_viable(seed_path.parent) is True
+ assert dsaz.DataSourceAzure.ds_detect(seed_path.parent) is True
def test_false_on_no_matching_azure_criteria(
self, azure_ds, mock_chassis_asset_tag
@@ -3360,8 +3376,13 @@ class TestIsPlatformViable:
seed_path = Path(azure_ds.seed_dir, "ovf-env.xml")
seed_path.parent.mkdir(exist_ok=True, parents=True)
+ paths = helpers.Paths(
+ {"cloud_dir": "/tmp/", "run_dir": "/tmp/", "seed_dir": seed_path}
+ )
- assert dsaz.is_platform_viable(seed_path) is False
+ assert (
+ dsaz.DataSourceAzure({}, mock.Mock(), paths).ds_detect() is False
+ )
class TestRandomSeed(CiTestCase):
@@ -3406,7 +3427,6 @@ class TestEphemeralNetworking:
mock.call(
iface=iface,
dhcp_log_func=dsaz.dhcp_log_cb,
- tmp_dir=azure_ds.distro.get_tmp_exec_path(),
),
mock.call().obtain_lease(),
]
@@ -3433,7 +3453,6 @@ class TestEphemeralNetworking:
mock.call(
iface=iface,
dhcp_log_func=dsaz.dhcp_log_cb,
- tmp_dir=azure_ds.distro.get_tmp_exec_path(),
),
mock.call().obtain_lease(),
]
@@ -3476,7 +3495,6 @@ class TestEphemeralNetworking:
mock.call(
iface=None,
dhcp_log_func=dsaz.dhcp_log_cb,
- tmp_dir=azure_ds.distro.get_tmp_exec_path(),
),
mock.call().obtain_lease(),
mock.call().obtain_lease(),
@@ -3511,7 +3529,6 @@ class TestEphemeralNetworking:
mock.call(
iface=None,
dhcp_log_func=dsaz.dhcp_log_cb,
- tmp_dir=azure_ds.distro.get_tmp_exec_path(),
),
mock.call().obtain_lease(),
mock.call().obtain_lease(),
@@ -3550,7 +3567,6 @@ class TestEphemeralNetworking:
mock.call(
iface=None,
dhcp_log_func=dsaz.dhcp_log_cb,
- tmp_dir=azure_ds.distro.get_tmp_exec_path(),
),
]
+ [mock.call().obtain_lease()] * 11
@@ -3596,15 +3612,6 @@ class TestEphemeralNetworking:
assert azure_ds._ephemeral_dhcp_ctx is None
-def fake_http_error_for_code(status_code: int):
- response_failure = requests.Response()
- response_failure.status_code = status_code
- return requests.exceptions.HTTPError(
- "fake error",
- response=response_failure,
- )
-
-
class TestInstanceId:
def test_metadata(self, azure_ds, mock_dmi_read_dmi_data):
azure_ds.metadata = {"instance-id": "test-id"}
@@ -3700,7 +3707,7 @@ class TestProvisioning:
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
@@ -3709,7 +3716,7 @@ class TestProvisioning:
timeout=2,
headers={"Metadata": "true"},
retries=10,
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
infinite=False,
log_req_resp=True,
),
@@ -3723,7 +3730,6 @@ class TestProvisioning:
mock.call(
None,
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
)
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
@@ -3763,13 +3769,13 @@ class TestProvisioning:
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -3788,7 +3794,7 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -3806,12 +3812,10 @@ class TestProvisioning:
mock.call(
None,
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
),
mock.call(
None,
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
),
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
@@ -3860,21 +3864,19 @@ class TestProvisioning:
)
self.mock_readurl.side_effect = [
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
- mock.MagicMock(
- contents=json.dumps(self.imds_md["network"]).encode()
- ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
mock.MagicMock(contents=construct_ovf_env().encode()),
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -3884,11 +3886,11 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
- retries=10,
+ retries=300,
timeout=2,
),
mock.call(
@@ -3903,7 +3905,7 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -3921,12 +3923,10 @@ class TestProvisioning:
mock.call(
None,
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
),
mock.call(
"ethAttached1",
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
),
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
@@ -4019,13 +4019,13 @@ class TestProvisioning:
None,
]
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -4035,11 +4035,11 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
- retries=10,
+ retries=300,
timeout=2,
),
mock.call(
@@ -4054,7 +4054,7 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -4072,12 +4072,10 @@ class TestProvisioning:
mock.call(
None,
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
),
mock.call(
"ethAttached1",
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
),
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
@@ -4128,13 +4126,13 @@ class TestProvisioning:
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -4153,7 +4151,7 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -4170,7 +4168,6 @@ class TestProvisioning:
mock.call(
None,
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
),
]
@@ -4189,6 +4186,35 @@ class TestProvisioning:
# Verify no netlink operations for recovering PPS.
assert self.mock_netlink.mock_calls == []
+ @pytest.mark.parametrize("pps_type", ["Savable", "Running", "Unknown"])
+ def test_source_pps_fails_initial_dhcp(self, pps_type):
+ self.imds_md["extended"]["compute"]["ppsType"] = pps_type
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(contents=construct_ovf_env().encode()),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+
+ self.mock_net_dhcp_maybe_perform_dhcp_discovery.side_effect = [
+ dhcp.NoDHCPLeaseError()
+ ]
+
+ with mock.patch.object(self.azure_ds, "_report_failure") as m_report:
+ self.azure_ds._get_data()
+
+ assert m_report.mock_calls == [mock.call()]
+
+ assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [
+ mock.call(timeout_minutes=20),
+ ]
+ assert self.mock_readurl.mock_calls == []
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == []
+ assert self.mock_netlink.mock_calls == []
+
@pytest.mark.parametrize(
"subp_side_effect",
[
@@ -4215,7 +4241,7 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- exception_cb=imds._readurl_exception_callback,
+ exception_cb=mock.ANY,
headers={"Metadata": "true"},
infinite=False,
log_req_resp=True,
@@ -4235,7 +4261,6 @@ class TestProvisioning:
mock.call(
None,
dsaz.dhcp_log_cb,
- self.azure_ds.distro.get_tmp_exec_path(),
)
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
index b92c3723..3b3279c8 100644
--- a/tests/unittests/sources/test_cloudsigma.py
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -10,7 +10,6 @@ from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
"cpu": 1000,
"cpus_instead_of_cores": False,
- "global_context": {"some_global_key": "some_global_val"},
"mem": 1073741824,
"meta": {
"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe",
@@ -44,7 +43,7 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
super(DataSourceCloudSigmaTest, self).setUp()
self.paths = helpers.Paths({"run_dir": self.tmp_dir()})
self.add_patch(
- DS_PATH + ".is_running_in_cloudsigma",
+ DS_PATH + ".override_ds_detect",
"m_is_container",
return_value=True,
)
@@ -99,6 +98,7 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
)
def test_encoded_user_data(self):
+
encoded_context = copy.deepcopy(SERVER_CONTEXT)
encoded_context["meta"]["base64_fields"] = "cloudinit-user-data"
encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK"
@@ -142,6 +142,3 @@ class DsLoads(test_helpers.TestCase):
["CloudSigma"], (sources.DEP_FILESYSTEM,), ["cloudinit.sources"]
)
self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
index 3fe525e3..2a311642 100644
--- a/tests/unittests/sources/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -879,7 +879,7 @@ class TestEc2(test_helpers.ResponsesTestCase):
ret = ds.get_data()
self.assertTrue(ret)
- m_dhcp.assert_called_once_with("eth9", None, mock.ANY)
+ m_dhcp.assert_called_once_with("eth9", None)
m_net4.assert_called_once_with(
broadcast="192.168.2.255",
interface="eth9",
diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py
index c71889f9..82b567d7 100644
--- a/tests/unittests/sources/test_exoscale.py
+++ b/tests/unittests/sources/test_exoscale.py
@@ -76,7 +76,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
full test data."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
+ ds.ds_detect = lambda: True
expected_password = "p@ssw0rd"
expected_id = "12345"
expected_hostname = "myname"
@@ -102,7 +102,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
- self.assertTrue(ds._get_data())
+ self.assertTrue(ds._check_and_get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
self.assertEqual(
ds.metadata,
@@ -124,7 +124,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
returned by the password server."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
+ ds.ds_detect = lambda: True
expected_answer = "saved_password"
expected_id = "12345"
expected_hostname = "myname"
@@ -150,7 +150,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
- self.assertTrue(ds._get_data())
+ self.assertTrue(ds._check_and_get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
self.assertEqual(
ds.metadata,
@@ -163,7 +163,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
returned by the password server."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
+ ds.ds_detect = lambda: True
expected_answer = ""
expected_id = "12345"
expected_hostname = "myname"
@@ -189,7 +189,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
- self.assertTrue(ds._get_data())
+ self.assertTrue(ds._check_and_get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
self.assertEqual(
ds.metadata,
@@ -236,5 +236,5 @@ class TestDatasourceExoscale(ResponsesTestCase):
"""The datasource fails fast when the platform is not viable."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: False
- self.assertFalse(ds._get_data())
+ ds.ds_detect = lambda: False
+ self.assertFalse(ds._check_and_get_data())
diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py
index dfaa472e..6dbeb85b 100644
--- a/tests/unittests/sources/test_hetzner.py
+++ b/tests/unittests/sources/test_hetzner.py
@@ -114,7 +114,6 @@ class TestDataSourceHetzner(CiTestCase):
connectivity_url_data={
"url": "http://169.254.169.254/hetzner/v1/metadata/instance-id"
},
- tmp_dir=mock.ANY,
)
self.assertTrue(m_readmd.called)
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
index 0447e02c..96e4dd90 100644
--- a/tests/unittests/sources/test_init.py
+++ b/tests/unittests/sources/test_init.py
@@ -894,15 +894,14 @@ class TestDataSource(CiTestCase):
self.datasource.default_update_events,
)
- def fake_get_data():
- raise Exception("get_data should not be called")
-
+ fake_get_data = mock.Mock()
self.datasource.get_data = fake_get_data
self.assertFalse(
self.datasource.update_metadata_if_supported(
source_event_types=[EventType.BOOT]
)
)
+ self.assertEqual([], fake_get_data.call_args_list)
@mock.patch.dict(
DataSource.supported_update_events,
diff --git a/tests/unittests/sources/test_nwcs.py b/tests/unittests/sources/test_nwcs.py
index 395f99f8..052e322a 100644
--- a/tests/unittests/sources/test_nwcs.py
+++ b/tests/unittests/sources/test_nwcs.py
@@ -47,16 +47,16 @@ class TestDataSourceNWCS(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceNWCS.EphemeralDHCPv4")
@mock.patch("cloudinit.net.find_fallback_nic")
@mock.patch("cloudinit.sources.DataSourceNWCS.read_metadata")
- @mock.patch("cloudinit.sources.DataSourceNWCS.get_nwcs_data")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.DataSourceNWCS.ds_detect")
def test_read_data(
self,
- m_get_nwcs_data,
+ m_ds_detect,
m_readmd,
m_fallback_nic,
m_net,
m_dhcp,
):
- m_get_nwcs_data.return_value = True
+ m_ds_detect.return_value = True
m_readmd.return_value = METADATA.copy()
m_fallback_nic.return_value = "eth0"
m_dhcp.return_value = [
@@ -92,13 +92,13 @@ class TestDataSourceNWCS(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceNWCS.read_metadata")
@mock.patch("cloudinit.net.find_fallback_nic")
- @mock.patch("cloudinit.sources.DataSourceNWCS.get_nwcs_data")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.DataSourceNWCS.ds_detect")
def test_not_on_nwcs_returns_false(
- self, m_get_nwcs_data, m_find_fallback, m_read_md
+ self, m_ds_detect, m_find_fallback, m_read_md
):
- """If helper 'get_nwcs_data' returns False,
+ """If 'ds_detect' returns False,
return False from get_data."""
- m_get_nwcs_data.return_value = False
+ m_ds_detect.return_value = False
ds = self.get_ds()
ret = ds.get_data()
diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
index 0fc332a9..43a5dd5f 100644
--- a/tests/unittests/sources/test_opennebula.py
+++ b/tests/unittests/sources/test_opennebula.py
@@ -562,26 +562,6 @@ class TestOpenNebulaNetwork(unittest.TestCase):
val = net.get_mask("eth0")
self.assertEqual("255.255.255.0", val)
- def test_get_network(self):
- """
- Verify get_network('device') correctly returns IPv4 network address.
- """
- context = {"ETH0_NETWORK": "1.2.3.0"}
- net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network("eth0", MACADDR)
- self.assertEqual("1.2.3.0", val)
-
- def test_get_network_emptystring(self):
- """
- Verify get_network('device') correctly returns IPv4 network address.
- It returns network address created by MAC address if ETH0_NETWORK has
- empty string.
- """
- context = {"ETH0_NETWORK": ""}
- net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network("eth0", MACADDR)
- self.assertEqual("10.18.1.0", val)
-
def test_get_field(self):
"""
Verify get_field('device', 'name') returns *context* value.
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
index 02516772..b37a7570 100644
--- a/tests/unittests/sources/test_openstack.py
+++ b/tests/unittests/sources/test_openstack.py
@@ -301,12 +301,12 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
responses_mock=self.responses,
)
distro = mock.MagicMock(spec=Distro)
- distro.is_virtual = False
ds_os = ds.DataSourceOpenStack(
settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
self.assertIsNone(ds_os.version)
- self.assertTrue(ds_os.get_data())
+ with mock.patch.object(ds_os, "override_ds_detect", return_value=True):
+ self.assertTrue(ds_os.get_data())
self.assertEqual(2, ds_os.version)
md = dict(ds_os.metadata)
md.pop("instance-id", None)
@@ -351,7 +351,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
self.assertIsNone(ds_os_local.version)
with test_helpers.mock.patch.object(
- ds_os_local, "detect_openstack"
+ ds_os_local, "override_ds_detect"
) as m_detect_os:
m_detect_os.return_value = True
found = ds_os_local.get_data()
@@ -367,7 +367,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
self.assertIsNone(ds_os_local.vendordata_raw)
- m_dhcp.assert_called_with("eth9", None, mock.ANY)
+ m_dhcp.assert_called_with("eth9", None)
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
@@ -384,7 +384,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
)
self.assertIsNone(ds_os.version)
with test_helpers.mock.patch.object(
- ds_os, "detect_openstack"
+ ds_os, "override_ds_detect"
) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
@@ -414,7 +414,8 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
"timeout": 0,
}
self.assertIsNone(ds_os.version)
- self.assertFalse(ds_os.get_data())
+ with mock.patch.object(ds_os, "override_ds_detect", return_value=True):
+ self.assertFalse(ds_os.get_data())
self.assertIsNone(ds_os.version)
def test_network_config_disabled_by_datasource_config(self):
@@ -490,7 +491,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
}
self.assertIsNone(ds_os.version)
with test_helpers.mock.patch.object(
- ds_os, "detect_openstack"
+ ds_os, "override_ds_detect"
) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
@@ -589,53 +590,17 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
- def test_detect_openstack_non_intel_x86(self, m_is_x86):
+ def test_ds_detect_non_intel_x86(self, m_is_x86):
"""Return True on non-intel platforms because dmi isn't conclusive."""
m_is_x86.return_value = False
self.assertTrue(
- self._fake_ds().detect_openstack(),
- "Expected detect_openstack == True",
+ self._fake_ds().ds_detect(),
+ "Expected ds_detect == True",
)
- def test_detect_openstack_bare_metal(self, m_is_x86):
- """Return True if the distro is non-virtual."""
- m_is_x86.return_value = True
-
- distro = mock.MagicMock(spec=Distro)
- distro.is_virtual = False
-
- fake_ds = self._fake_ds()
- fake_ds.distro = distro
-
- self.assertFalse(
- fake_ds.distro.is_virtual,
- "Expected distro.is_virtual == False",
- )
-
- with test_helpers.mock.patch.object(
- fake_ds, "wait_for_metadata_service"
- ) as m_wait_for_metadata_service:
- m_wait_for_metadata_service.return_value = True
-
- self.assertTrue(
- fake_ds.wait_for_metadata_service(),
- "Expected wait_for_metadata_service == True",
- )
-
- self.assertTrue(
- fake_ds.detect_openstack(), "Expected detect_openstack == True"
- )
-
- self.assertTrue(
- m_wait_for_metadata_service.called,
- "Expected wait_for_metadata_service to be called",
- )
-
@test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_not_detect_openstack_intel_x86_ec2(
- self, m_dmi, m_proc_env, m_is_x86
- ):
+ def test_not_ds_detect_intel_x86_ec2(self, m_dmi, m_proc_env, m_is_x86):
"""Return False on EC2 platforms."""
m_is_x86.return_value = True
# No product_name in proc/1/environ
@@ -650,15 +615,13 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertFalse(
- self._fake_ds().detect_openstack(),
- "Expected detect_openstack == False on EC2",
+ self._fake_ds().ds_detect(),
+ "Expected ds_detect == False on EC2",
)
m_proc_env.assert_called_with(1)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_intel_product_name_compute(
- self, m_dmi, m_is_x86
- ):
+ def test_ds_detect_intel_product_name_compute(self, m_dmi, m_is_x86):
"""Return True on OpenStack compute and nova instances."""
m_is_x86.return_value = True
openstack_product_names = ["OpenStack Nova", "OpenStack Compute"]
@@ -666,12 +629,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
for product_name in openstack_product_names:
m_dmi.return_value = product_name
self.assertTrue(
- self._fake_ds().detect_openstack(),
- "Failed to detect_openstack",
+ self._fake_ds().ds_detect(),
+ "Failed to ds_detect",
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_opentelekomcloud_chassis_asset_tag(
+ def test_ds_detect_opentelekomcloud_chassis_asset_tag(
self, m_dmi, m_is_x86
):
"""Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
@@ -686,14 +649,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- self._fake_ds().detect_openstack(),
- "Expected detect_openstack == True on OpenTelekomCloud",
+ self._fake_ds().ds_detect(),
+ "Expected ds_detect == True on OpenTelekomCloud",
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_sapccloud_chassis_asset_tag(
- self, m_dmi, m_is_x86
- ):
+ def test_ds_detect_sapccloud_chassis_asset_tag(self, m_dmi, m_is_x86):
"""Return True on OpenStack reporting SAP CCloud VM asset-tag."""
m_is_x86.return_value = True
@@ -706,14 +667,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- self._fake_ds().detect_openstack(),
- "Expected detect_openstack == True on SAP CCloud VM",
+ self._fake_ds().ds_detect(),
+ "Expected ds_detect == True on SAP CCloud VM",
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_huaweicloud_chassis_asset_tag(
- self, m_dmi, m_is_x86
- ):
+ def test_ds_detect_huaweicloud_chassis_asset_tag(self, m_dmi, m_is_x86):
"""Return True on OpenStack reporting Huawei Cloud VM asset-tag."""
m_is_x86.return_value = True
@@ -726,14 +685,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_asset_tag_dmi_read
self.assertTrue(
- self._fake_ds().detect_openstack(),
- "Expected detect_openstack == True on Huawei Cloud VM",
+ self._fake_ds().ds_detect(),
+ "Expected ds_detect == True on Huawei Cloud VM",
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_oraclecloud_chassis_asset_tag(
- self, m_dmi, m_is_x86
- ):
+ def test_ds_detect_oraclecloud_chassis_asset_tag(self, m_dmi, m_is_x86):
"""Return True on OpenStack reporting Oracle cloud asset-tag."""
m_is_x86.return_value = True
@@ -745,16 +702,19 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
assert False, "Unexpected dmi read of %s" % dmi_key
m_dmi.side_effect = fake_dmi_read
+ ds = self._fake_ds()
+ ds.sys_cfg = {"datasource_list": ["Oracle"]}
self.assertTrue(
- self._fake_ds().detect_openstack(accept_oracle=True),
- "Expected detect_openstack == True on OracleCloud.com",
+ ds.ds_detect(),
+ "Expected ds_detect == True on OracleCloud.com",
)
+ ds.sys_cfg = {"datasource_list": []}
self.assertFalse(
- self._fake_ds().detect_openstack(accept_oracle=False),
- "Expected detect_openstack == False.",
+ ds.ds_detect(),
+ "Expected ds_detect == False.",
)
- def _test_detect_openstack_nova_compute_chassis_asset_tag(
+ def _test_ds_detect_nova_compute_chassis_asset_tag(
self, m_dmi, m_is_x86, chassis_tag
):
"""Return True on OpenStack reporting generic asset-tag."""
@@ -769,27 +729,25 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- self._fake_ds().detect_openstack(),
- "Expected detect_openstack == True on Generic OpenStack Platform",
+ self._fake_ds().ds_detect(),
+ "Expected ds_detect == True on Generic OpenStack Platform",
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, m_is_x86):
- self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ def test_ds_detect_nova_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_ds_detect_nova_compute_chassis_asset_tag(
m_dmi, m_is_x86, "OpenStack Nova"
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, m_is_x86):
- self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ def test_ds_detect_compute_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_ds_detect_nova_compute_chassis_asset_tag(
m_dmi, m_is_x86, "OpenStack Compute"
)
@test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
- def test_detect_openstack_by_proc_1_environ(
- self, m_dmi, m_proc_env, m_is_x86
- ):
+ def test_ds_detect_by_proc_1_environ(self, m_dmi, m_proc_env, m_is_x86):
"""Return True when nova product_name specified in /proc/1/environ."""
m_is_x86.return_value = True
# Nova product_name in proc/1/environ
@@ -807,8 +765,8 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- self._fake_ds().detect_openstack(),
- "Expected detect_openstack == True on OpenTelekomCloud",
+ self._fake_ds().ds_detect(),
+ "Expected ds_detect == True on OpenTelekomCloud",
)
m_proc_env.assert_called_with(1)
@@ -915,6 +873,3 @@ class TestMetadataReader(test_helpers.ResponsesTestCase):
reader._read_ec2_metadata = mock_read_ec2
self.assertEqual(expected, reader.read_v2())
self.assertEqual(1, mock_read_ec2.call_count)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
index 22aba7e2..c67cacef 100644
--- a/tests/unittests/sources/test_oracle.py
+++ b/tests/unittests/sources/test_oracle.py
@@ -121,7 +121,7 @@ def oracle_ds(request, fixture_utils, paths, metadata_version, mocker):
This also performs the mocking required:
* ``_read_system_uuid`` returns something,
- * ``_is_platform_viable`` returns True,
+ * ``ds_detect`` returns True,
* ``DataSourceOracle._is_iscsi_root`` returns True by default or what
pytest.mark.is_iscsi gives as first param,
* ``DataSourceOracle._get_iscsi_config`` returns a network cfg if
@@ -144,7 +144,7 @@ def oracle_ds(request, fixture_utils, paths, metadata_version, mocker):
mocker.patch(DS_PATH + ".net.find_fallback_nic")
mocker.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4")
mocker.patch(DS_PATH + "._read_system_uuid", return_value="someuuid")
- mocker.patch(DS_PATH + "._is_platform_viable", return_value=True)
+ mocker.patch(DS_PATH + ".DataSourceOracle.ds_detect", return_value=True)
mocker.patch(DS_PATH + ".read_opc_metadata", return_value=metadata)
mocker.patch(DS_PATH + ".KlibcOracleNetworkConfigSource")
ds = oracle.DataSourceOracle(
@@ -170,7 +170,7 @@ class TestDataSourceOracle:
assert "unknown" == oracle_ds.subplatform
def test_platform_info_after_fetch(self, oracle_ds):
- oracle_ds._get_data()
+ oracle_ds._check_and_get_data()
assert (
"metadata (http://169.254.169.254/opc/v2/)"
== oracle_ds.subplatform
@@ -178,7 +178,7 @@ class TestDataSourceOracle:
@pytest.mark.parametrize("metadata_version", [1])
def test_v1_platform_info_after_fetch(self, oracle_ds):
- oracle_ds._get_data()
+ oracle_ds._check_and_get_data()
assert (
"metadata (http://169.254.169.254/opc/v1/)"
== oracle_ds.subplatform
@@ -206,11 +206,11 @@ class TestIsPlatformViable:
("LetsGoCubs", False),
],
)
- def test_is_platform_viable(self, dmi_data, platform_viable):
+ def test_ds_detect(self, dmi_data, platform_viable):
with mock.patch(
DS_PATH + ".dmi.read_dmi_data", return_value=dmi_data
) as m_read_dmi_data:
- assert platform_viable == oracle._is_platform_viable()
+ assert platform_viable == oracle.DataSourceOracle.ds_detect()
m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
@@ -830,13 +830,13 @@ class TestCommon_GetDataBehaviour:
"""
@mock.patch(
- DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
+ DS_PATH + ".DataSourceOracle.ds_detect", mock.Mock(return_value=False)
)
def test_false_if_platform_not_viable(
self,
oracle_ds,
):
- assert not oracle_ds._get_data()
+ assert not oracle_ds._check_and_get_data()
@pytest.mark.parametrize(
"keyname,expected_value",
@@ -862,7 +862,7 @@ class TestCommon_GetDataBehaviour:
expected_value,
oracle_ds,
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert expected_value == oracle_ds.metadata[keyname]
@pytest.mark.parametrize(
@@ -885,7 +885,7 @@ class TestCommon_GetDataBehaviour:
expected_value,
oracle_ds,
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert expected_value == getattr(oracle_ds, attribute_name)
@pytest.mark.parametrize(
@@ -917,7 +917,7 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert expected_value == oracle_ds.get_public_ssh_keys()
def test_missing_user_data_handled_gracefully(self, oracle_ds):
@@ -928,7 +928,7 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert oracle_ds.userdata_raw is None
@@ -940,7 +940,7 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert oracle_ds.userdata_raw is None
assert [] == oracle_ds.get_public_ssh_keys()
@@ -978,7 +978,7 @@ class TestNonIscsiRoot_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(side_effect=assert_in_context_manager),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert [
mock.call(
@@ -987,7 +987,6 @@ class TestNonIscsiRoot_GetDataBehaviour:
"headers": {"Authorization": "Bearer Oracle"},
"url": "http://169.254.169.254/opc/v2/instance/",
},
- tmp_dir=oracle_ds.distro.get_tmp_exec_path(),
)
] == m_EphemeralDHCPv4.call_args_list
@@ -1021,7 +1020,7 @@ class TestNonIscsiRoot_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(side_effect=assert_in_context_manager),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert [
mock.call(
@@ -1030,7 +1029,6 @@ class TestNonIscsiRoot_GetDataBehaviour:
"headers": {"Authorization": "Bearer Oracle"},
"url": "http://169.254.169.254/opc/v2/instance/",
},
- tmp_dir=oracle_ds.distro.get_tmp_exec_path(),
)
] == m_EphemeralDHCPv4.call_args_list
@@ -1134,6 +1132,15 @@ class TestNetworkConfig:
initramfs_idx = config_sources.index(NetworkConfigSource.INITRAMFS)
assert ds_idx < initramfs_idx
+ def test_system_network_cfg_preferred_over_ds(
+ self, m_get_interfaces_by_mac
+ ):
+ """Ensure that system net config is preferred over DS config"""
+ config_sources = oracle.DataSourceOracle.network_config_sources
+ ds_idx = config_sources.index(NetworkConfigSource.DS)
+ system_idx = config_sources.index(NetworkConfigSource.SYSTEM_CFG)
+ assert system_idx < ds_idx
+
@pytest.mark.parametrize("set_primary", [True, False])
def test__add_network_config_from_opc_imds_no_vnics_data(
self,
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
index f9b470cb..d6a0874d 100644
--- a/tests/unittests/sources/test_scaleway.py
+++ b/tests/unittests/sources/test_scaleway.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
+import sys
from urllib.parse import SplitResult, urlsplit
import requests
@@ -90,7 +91,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_not_on_scaleway(
+ def test_not_ds_detect(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
self.install_mocks(
@@ -98,7 +99,7 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, False),
fake_cmdline=(m_get_cmdline, False),
)
- self.assertFalse(DataSourceScaleway.on_scaleway())
+ self.assertFalse(DataSourceScaleway.DataSourceScaleway.ds_detect())
# When not on Scaleway, get_data() returns False.
datasource = DataSourceScaleway.DataSourceScaleway(
@@ -109,7 +110,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_on_scaleway_dmi(
+ def test_ds_detect_dmi(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
"""
@@ -121,12 +122,12 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, False),
fake_cmdline=(m_get_cmdline, False),
)
- self.assertTrue(DataSourceScaleway.on_scaleway())
+ self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect())
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_on_scaleway_var_run_scaleway(
+ def test_ds_detect_var_run_scaleway(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
"""
@@ -137,12 +138,12 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, True),
fake_cmdline=(m_get_cmdline, False),
)
- self.assertTrue(DataSourceScaleway.on_scaleway())
+ self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect())
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_on_scaleway_cmdline(
+ def test_ds_detect_cmdline(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
"""
@@ -153,7 +154,7 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, False),
fake_cmdline=(m_get_cmdline, True),
)
- self.assertTrue(DataSourceScaleway.on_scaleway())
+ self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect())
def get_source_address_adapter(*args, **kwargs):
@@ -204,8 +205,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
]
self.add_patch(
- "cloudinit.sources.DataSourceScaleway.on_scaleway",
- "_m_on_scaleway",
+ "cloudinit.sources.DataSourceScaleway."
+ "DataSourceScaleway.ds_detect",
+ "_m_ds_detect",
return_value=True,
)
self.add_patch(
@@ -225,6 +227,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
"""
get_data() returns metadata, user data and vendor data.
"""
+ # fails on python 3.6
+ if sys.version_info.minor < 7:
+ return
m_get_cmdline.return_value = "scaleway"
# Make user data API return a valid response
@@ -355,6 +360,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
"""
get_data() returns metadata, but no user data nor vendor data.
"""
+ # fails on python 3.6
+ if sys.version_info.minor < 7:
+ return
m_get_cmdline.return_value = "scaleway"
# Make user and vendor data APIs return HTTP/404, which means there is
@@ -386,6 +394,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
get_data() is rate limited two times by the metadata API when fetching
user data.
"""
+ if sys.version_info.minor < 7:
+ return
+
m_get_cmdline.return_value = "scaleway"
self.responses.add_callback(
diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py
index 9cbd33d9..694f4084 100644
--- a/tests/unittests/sources/test_upcloud.py
+++ b/tests/unittests/sources/test_upcloud.py
@@ -242,7 +242,7 @@ class TestUpCloudNetworkSetup(CiTestCase):
self.assertTrue(ret)
self.assertTrue(m_dhcp.called)
- m_dhcp.assert_called_with("eth1", None, mock.ANY)
+ m_dhcp.assert_called_with("eth1", None)
m_net.assert_called_once_with(
broadcast="10.6.3.255",
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
index 4911e5bc..da5213d2 100644
--- a/tests/unittests/sources/test_vmware.py
+++ b/tests/unittests/sources/test_vmware.py
@@ -101,7 +101,11 @@ class TestDataSourceVMware(CiTestCase):
def test_no_data_access_method(self):
ds = get_ds(self.tmp)
ds.vmware_rpctool = None
- ret = ds.get_data()
+ with mock.patch(
+ "cloudinit.sources.DataSourceVMware.is_vmware_platform",
+ return_value=False,
+ ):
+ ret = ds.get_data()
self.assertFalse(ret)
@mock.patch("cloudinit.sources.DataSourceVMware.get_default_ip_addrs")
diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py
index 2fc2e21c..e09fdfe4 100644
--- a/tests/unittests/sources/vmware/test_vmware_config_file.py
+++ b/tests/unittests/sources/vmware/test_vmware_config_file.py
@@ -50,18 +50,8 @@ class TestVmwareConfigFile(CiTestCase):
self.assertEqual(2, len(cf), "insert size")
self.assertEqual("foo", cf["PASSWORD|-PASS"], "password")
self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
- self.assertFalse(
- cf.should_keep_current_value("PASSWORD|-PASS"), "keepPassword"
- )
- self.assertFalse(
- cf.should_remove_current_value("PASSWORD|-PASS"), "removePassword"
- )
self.assertFalse("FOO" in cf, "hasFoo")
- self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
- self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
self.assertTrue("BAR" in cf, "hasBar")
- self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
- self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
def test_configfile_without_instance_id(self):
"""
@@ -95,7 +85,6 @@ class TestVmwareConfigFile(CiTestCase):
self.assertEqual("myhost1", conf.host_name, "hostName")
self.assertEqual("Africa/Abidjan", conf.timezone, "tz")
- self.assertTrue(conf.utc, "utc")
self.assertEqual(
["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns"
diff --git a/tests/unittests/test_apport.py b/tests/unittests/test_apport.py
index 1876c1be..c731a30a 100644
--- a/tests/unittests/test_apport.py
+++ b/tests/unittests/test_apport.py
@@ -1,3 +1,5 @@
+import os
+
import pytest
from tests.unittests.helpers import mock
@@ -5,24 +7,74 @@ from tests.unittests.helpers import mock
M_PATH = "cloudinit.apport."
+@pytest.fixture()
+def apport(request, mocker, paths):
+ """Mock apport.hookutils before importing cloudinit.apport.
+
+ This avoids our optional import dependency on apport, providing tests with
+ mocked apport.hookutils function call counts.
+ """
+ m_hookutils = mock.Mock()
+ mocker.patch.dict("sys.modules", {"apport.hookutils": m_hookutils})
+ mocker.patch(M_PATH + "read_cfg_paths", return_value=paths)
+ from cloudinit import apport
+
+ yield apport
+
+
class TestApport:
- def test_attach_user_data(self, mocker, tmpdir):
- m_hookutils = mock.Mock()
- mocker.patch.dict("sys.modules", {"apport.hookutils": m_hookutils})
- user_data_file = tmpdir.join("instance", "user-data.txt")
- mocker.patch(
- M_PATH + "_get_user_data_file", return_value=user_data_file
- )
+ @pytest.mark.parametrize(
+ "instance_data,choice_idx,expected_report",
+ (
+ pytest.param(
+ '{"v1": {"cloud_name": "mycloud"}}',
+ None,
+ {},
+ id="v1_cloud_name_exists",
+ ),
+ pytest.param(
+ '{"v1": {"cloud_id": "invalid"}}',
+ 1,
+ {"CloudName": "Azure"},
+ id="v1_no_cloud_name_present",
+ ),
+ pytest.param("{}", 0, {"CloudName": "AliYun"}, id="no_v1_key"),
+ pytest.param(
+ "{", 22, {"CloudName": "Oracle"}, id="not_valid_json"
+ ),
+ ),
+ )
+ def test_attach_cloud_info(
+ self, instance_data, choice_idx, expected_report, apport, paths
+ ):
+ """Prompt for cloud name when instance-data.json is not-json/absent."""
- from cloudinit import apport
+ instance_data_file = paths.get_runpath("instance_data")
+ if instance_data is None:
+ assert not os.path.exists(instance_data_file)
+ else:
+ with open(instance_data_file, "w") as stream:
+ stream.write(instance_data)
+ ui = mock.Mock()
+ ui.yesno.return_value = True
+ ui.choice.return_value = (choice_idx, "")
+ report = {}
+ apport.attach_cloud_info(report, ui)
+ if choice_idx is not None:
+ assert ui.choice.call_count == 1
+ assert report["CloudName"] == apport.KNOWN_CLOUD_NAMES[choice_idx]
+ else:
+ assert ui.choice.call_count == 0
+ def test_attach_user_data(self, apport, paths):
+ user_data_file = paths.get_ipath_cur("userdata_raw")
ui = mock.Mock()
ui.yesno.return_value = True
report = object()
apport.attach_user_data(report, ui)
assert [
mock.call(report, user_data_file, "user_data.txt"),
- ] == m_hookutils.attach_file.call_args_list
+ ] == apport.attach_file.call_args_list
assert [
mock.call(
report,
@@ -35,7 +87,7 @@ class TestApport:
"/etc/cloud/cloud.cfg.d/99-installer.cfg",
"InstallerCloudCfg",
),
- ] == m_hookutils.attach_file_if_exists.call_args_list
+ ] == apport.attach_file_if_exists.call_args_list
@pytest.mark.parametrize(
"report,tags",
@@ -52,9 +104,8 @@ class TestApport:
),
),
)
- def test_add_bug_tags_assigns_proper_tags(self, report, tags):
+ def test_add_bug_tags_assigns_proper_tags(self, report, tags, apport):
"""Tags are assigned based on non-empty project report key values."""
- from cloudinit import apport
apport.add_bug_tags(report)
assert report.get("Tags", "") == tags
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 2d57ba04..e3fed410 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -55,14 +55,12 @@ class TestCLI:
data_d = tmpdir.join("data")
link_d = tmpdir.join("link")
FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
+ my_action = mock.Mock()
- def myaction():
- raise Exception("Should not call myaction")
-
- myargs = FakeArgs((action, myaction), False, "bogusmode")
+ myargs = FakeArgs((action, my_action), False, "bogusmode")
with pytest.raises(ValueError, match=match):
cli.status_wrapper(name, myargs, data_d, link_d)
- assert "Should not call myaction" not in caplog.text
+ assert [] == my_action.call_args_list
def test_status_wrapper_init_local_writes_fresh_status_info(self, tmpdir):
"""When running in init-local mode, status_wrapper writes status.json.
@@ -149,7 +147,6 @@ class TestCLI:
"analyze",
"clean",
"devel",
- "dhclient-hook",
"features",
"init",
"modules",
@@ -319,19 +316,6 @@ class TestCLI:
assert "cc_ntp" == parseargs.name
assert False is parseargs.report
- @mock.patch("cloudinit.cmd.main.dhclient_hook.handle_args")
- def test_dhclient_hook_subcommand(self, m_handle_args):
- """The subcommand 'dhclient-hook' calls dhclient_hook with args."""
- self._call_main(["cloud-init", "dhclient-hook", "up", "eth0"])
- (name, parseargs) = m_handle_args.call_args_list[0][0]
- assert "dhclient-hook" == name
- assert "dhclient-hook" == parseargs.subcommand
- assert "dhclient-hook" == parseargs.action[0]
- assert False is parseargs.debug
- assert False is parseargs.force
- assert "up" == parseargs.event
- assert "eth0" == parseargs.interface
-
@mock.patch("cloudinit.cmd.main.main_features")
def test_features_hook_subcommand(self, m_features):
"""The subcommand 'features' calls main_features with args."""
diff --git a/tests/unittests/test_dhclient_hook.py b/tests/unittests/test_dhclient_hook.py
deleted file mode 100644
index 7e5b54c0..00000000
--- a/tests/unittests/test_dhclient_hook.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for cloudinit.dhclient_hook."""
-
-import argparse
-import json
-import os
-from unittest import mock
-
-from cloudinit import dhclient_hook as dhc
-from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir
-
-
-class TestDhclientHook(CiTestCase):
-
- ex_env = {
- "interface": "eth0",
- "new_dhcp_lease_time": "3600",
- "new_host_name": "x1",
- "new_ip_address": "10.145.210.163",
- "new_subnet_mask": "255.255.255.0",
- "old_host_name": "x1",
- "PATH": "/usr/sbin:/usr/bin:/sbin:/bin",
- "pid": "614",
- "reason": "BOUND",
- }
-
- # some older versions of dhclient put the same content,
- # but in upper case with DHCP4_ instead of new_
- ex_env_dhcp4 = {
- "REASON": "BOUND",
- "DHCP4_dhcp_lease_time": "3600",
- "DHCP4_host_name": "x1",
- "DHCP4_ip_address": "10.145.210.163",
- "DHCP4_subnet_mask": "255.255.255.0",
- "INTERFACE": "eth0",
- "PATH": "/usr/sbin:/usr/bin:/sbin:/bin",
- "pid": "614",
- }
-
- expected = {
- "dhcp_lease_time": "3600",
- "host_name": "x1",
- "ip_address": "10.145.210.163",
- "subnet_mask": "255.255.255.0",
- }
-
- def setUp(self):
- super(TestDhclientHook, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_handle_args(self):
- """quick test of call to handle_args."""
- nic = "eth0"
- args = argparse.Namespace(event=dhc.UP, interface=nic)
- with mock.patch.dict("os.environ", clear=True, values=self.ex_env):
- dhc.handle_args(dhc.NAME, args, data_d=self.tmp)
- found = dir2dict(self.tmp + os.path.sep)
- self.assertEqual([nic + ".json"], list(found.keys()))
- self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
-
- def test_run_hook_up_creates_dir(self):
- """If dir does not exist, run_hook should create it."""
- subd = self.tmp_path("subdir", self.tmp)
- nic = "eth1"
- dhc.run_hook(nic, "up", data_d=subd, env=self.ex_env)
- self.assertEqual(
- set([nic + ".json"]), set(dir2dict(subd + os.path.sep))
- )
-
- def test_run_hook_up(self):
- """Test expected use of run_hook_up."""
- nic = "eth0"
- dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env)
- found = dir2dict(self.tmp + os.path.sep)
- self.assertEqual([nic + ".json"], list(found.keys()))
- self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
-
- def test_run_hook_up_dhcp4_prefix(self):
- """Test run_hook filters correctly with older DHCP4_ data."""
- nic = "eth0"
- dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env_dhcp4)
- found = dir2dict(self.tmp + os.path.sep)
- self.assertEqual([nic + ".json"], list(found.keys()))
- self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
-
- def test_run_hook_down_deletes(self):
- """down should delete the created json file."""
- nic = "eth1"
- populate_dir(
- self.tmp, {nic + ".json": "{'abcd'}", "myfile.txt": "text"}
- )
- dhc.run_hook(nic, "down", data_d=self.tmp, env={"old_host_name": "x1"})
- self.assertEqual(
- set(["myfile.txt"]), set(dir2dict(self.tmp + os.path.sep))
- )
-
- def test_get_parser(self):
- """Smoke test creation of get_parser."""
- # cloud-init main uses 'action'.
- event, interface = (dhc.UP, "mynic0")
- self.assertEqual(
- argparse.Namespace(
- event=event,
- interface=interface,
- action=(dhc.NAME, dhc.handle_args),
- ),
- dhc.get_parser().parse_args([event, interface]),
- )
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 03be0c92..cc75209e 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -950,7 +950,7 @@ class TestOracle(DsIdentifyBase):
"""Simple negative test of Oracle."""
mycfg = copy.deepcopy(VALID_CFG["Oracle"])
mycfg["files"][P_CHASSIS_ASSET_TAG] = "Not Oracle"
- self._check_via_dict(mycfg, ds=["openstack", "none"], rc=RC_FOUND)
+ self._check_via_dict(mycfg, rc=RC_NOT_FOUND)
def blkid_out(disks=None):
@@ -1056,7 +1056,6 @@ VALID_CFG = {
"Ec2-brightbox-negative": {
"ds": "Ec2",
"files": {P_PRODUCT_SERIAL: "tricky-host.bobrightbox.com\n"},
- "mocks": [MOCK_VIRT_IS_KVM],
},
"GCE": {
"ds": "GCE",
@@ -1598,7 +1597,6 @@ VALID_CFG = {
"Ec2-E24Cloud-negative": {
"ds": "Ec2",
"files": {P_SYS_VENDOR: "e24cloudyday\n"},
- "mocks": [MOCK_VIRT_IS_KVM],
},
"VMware-NoValidTransports": {
"ds": "VMware",
@@ -1757,7 +1755,6 @@ VALID_CFG = {
"VMware-GuestInfo-NoVirtID": {
"ds": "VMware",
"mocks": [
- MOCK_VIRT_IS_KVM,
{
"name": "vmware_has_rpctool",
"ret": 0,
@@ -1863,7 +1860,6 @@ VALID_CFG = {
P_PRODUCT_NAME: "3DS Outscale VM\n",
P_SYS_VENDOR: "Not 3DS Outscale\n",
},
- "mocks": [MOCK_VIRT_IS_KVM],
},
"Ec2-Outscale-negative-productname": {
"ds": "Ec2",
@@ -1871,7 +1867,6 @@ VALID_CFG = {
P_PRODUCT_NAME: "Not 3DS Outscale VM\n",
P_SYS_VENDOR: "3DS Outscale\n",
},
- "mocks": [MOCK_VIRT_IS_KVM],
},
}
diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py
index 94c7ae13..8aace78d 100644
--- a/tests/unittests/test_features.py
+++ b/tests/unittests/test_features.py
@@ -27,7 +27,7 @@ def create_override(request):
"""
override_path = Path(cloudinit.__file__).parent / "feature_overrides.py"
if override_path.exists():
- raise Exception(
+ raise RuntimeError(
"feature_overrides.py unexpectedly exists! "
"Remove it to run this test."
)
diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py
index 87c69dbb..38791ef9 100644
--- a/tests/unittests/test_log.py
+++ b/tests/unittests/test_log.py
@@ -8,6 +8,7 @@ import logging
import time
from cloudinit import log as ci_logging
+from cloudinit import util
from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT
from tests.unittests.helpers import CiTestCase
@@ -57,3 +58,32 @@ class TestCloudInitLogger(CiTestCase):
self.assertLess(parsed_dt, utc_after)
self.assertLess(utc_before, utc_after)
self.assertGreater(utc_after, parsed_dt)
+
+
+class TestDeprecatedLogs:
+ def test_deprecated_log_level(self, caplog):
+ ci_logging.setupLogging()
+ log = ci_logging.getLogger()
+ log.deprecated("deprecated message")
+ assert "DEPRECATED" == caplog.records[0].levelname
+ assert "deprecated message" in caplog.text
+
+ def test_log_deduplication(self, caplog):
+ ci_logging.defineDeprecationLogger()
+ util.deprecate(
+ deprecated="stuff",
+ deprecated_version="19.1",
+ extra_message=":)",
+ )
+ util.deprecate(
+ deprecated="stuff",
+ deprecated_version="19.1",
+ extra_message=":)",
+ )
+ util.deprecate(
+ deprecated="stuff",
+ deprecated_version="19.1",
+ extra_message=":)",
+ schedule=6,
+ )
+ assert 2 == len(caplog.records)
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 056aaeb6..d7640d70 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -13,7 +13,7 @@ from typing import Optional
import pytest
from yaml.serializer import Serializer
-from cloudinit import distros, net
+from cloudinit import distros, log, net
from cloudinit import safeyaml as yaml
from cloudinit import subp, temp_utils, util
from cloudinit.net import (
@@ -5230,6 +5230,7 @@ USERCTL=no
""" # noqa: E501
),
}
+ log.setupLogging()
found = self._render_and_read(network_config=v2_data)
self._compare_files_to_expected(expected, found)
@@ -8222,23 +8223,39 @@ class TestGetInterfacesByMac(CiTestCase):
}
self.assertEqual(expected, result)
- def test_duplicate_ignored_macs(self):
- # LP: #199792
- self._data = copy.deepcopy(self._data)
- self._data["macs"]["swp0"] = "9a:57:7d:78:47:c0"
- self._data["macs"]["swp1"] = "9a:57:7d:78:47:c0"
- self._data["own_macs"].append("swp0")
- self._data["own_macs"].append("swp1")
- self._data["drivers"]["swp0"] = "mscc_felix"
- self._data["drivers"]["swp1"] = "mscc_felix"
- self._mock_setup()
+
+@pytest.mark.parametrize("driver", ("mscc_felix", "fsl_enetc", "qmi_wwan"))
+@mock.patch("cloudinit.net.get_sys_class_path")
+@mock.patch("cloudinit.util.system_info", return_value={"variant": "ubuntu"})
+class TestDuplicateMac:
+ def test_duplicate_ignored_macs(
+ self, _get_system_info, get_sys_class_path, driver, tmpdir, caplog
+ ):
+ # Create sysfs representation of network devices and drivers in tmpdir
+ sys_net_path = tmpdir.join("class/net")
+ get_sys_class_path.return_value = sys_net_path.strpath + "/"
+ net_data = {
+ "swp0/address": "9a:57:7d:78:47:c0",
+ "swp0/addr_assign_type": "0",
+ "swp0/device/dev_id": "something",
+ "swp1/address": "9a:57:7d:78:47:c0",
+ "swp1/addr_assign_type": "0",
+ "swp1/device/dev_id": "something else",
+ }
+ populate_dir(sys_net_path.strpath, net_data)
+ # Symlink for device driver
+ driver_path = tmpdir.join(f"module/{driver}")
+ driver_path.ensure_dir()
+ sys_net_path.join("swp0/device/driver").mksymlinkto(driver_path)
+ sys_net_path.join("swp1/device/driver").mksymlinkto(driver_path)
+
with does_not_raise():
net.get_interfaces_by_mac()
pattern = (
"Ignoring duplicate macs from 'swp[0-1]' and 'swp[0-1]' due to "
- "driver 'mscc_felix'."
+ f"driver '{driver}'."
)
- assert re.search(pattern, self.logs.getvalue())
+ assert re.search(pattern, caplog.text)
class TestInterfacesSorting(CiTestCase):
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
index afd9056a..2a363ec4 100644
--- a/tests/unittests/test_net_activators.py
+++ b/tests/unittests/test_net_activators.py
@@ -322,28 +322,3 @@ class TestActivatorsBringDown:
activator.bring_down_interface("eth0")
assert len(m_subp.call_args_list) == 1
assert m_subp.call_args_list[0] == expected_call_list[0]
-
- @patch("cloudinit.subp.subp", return_value=("", ""))
- def test_bring_down_interfaces(
- self, m_subp, activator, expected_call_list, available_mocks
- ):
- activator.bring_down_interfaces(["eth0", "eth1"])
- assert expected_call_list == m_subp.call_args_list
-
- @patch("cloudinit.subp.subp", return_value=("", ""))
- def test_bring_down_all_interfaces_v1(
- self, m_subp, activator, expected_call_list, available_mocks
- ):
- network_state = parse_net_config_data(load(V1_CONFIG))
- activator.bring_down_all_interfaces(network_state)
- for call in m_subp.call_args_list:
- assert call in expected_call_list
-
- @patch("cloudinit.subp.subp", return_value=("", ""))
- def test_bring_down_all_interfaces_v2(
- self, m_subp, activator, expected_call_list, available_mocks
- ):
- network_state = parse_net_config_data(load(V2_CONFIG))
- activator.bring_down_all_interfaces(network_state)
- for call in m_subp.call_args_list:
- assert call in expected_call_list
diff --git a/tests/unittests/test_netinfo.py b/tests/unittests/test_netinfo.py
index aecce921..7612a28b 100644
--- a/tests/unittests/test_netinfo.py
+++ b/tests/unittests/test_netinfo.py
@@ -198,7 +198,7 @@ class TestNetInfo:
return (SAMPLE_ROUTE_OUT_V4, "")
if args[0] == ["netstat", "-A", "inet6", "--route", "--numeric"]:
return (SAMPLE_ROUTE_OUT_V6, "")
- raise Exception("Unexpected subp call %s" % args[0])
+ raise RuntimeError("Unexpected subp call %s" % args[0])
m_subp.side_effect = subp_netstat_route_selector
m_which.side_effect = lambda x: x if x == "netstat" else None
@@ -216,7 +216,7 @@ class TestNetInfo:
v6cmd = ["ip", "--oneline", "-6", "route", "list", "table", "all"]
if v6cmd == args[0]:
return (SAMPLE_IPROUTE_OUT_V6, "")
- raise Exception("Unexpected subp call %s" % args[0])
+ raise RuntimeError("Unexpected subp call %s" % args[0])
m_subp.side_effect = subp_iproute_selector
m_which.side_effect = lambda x: x if x == "ip" else None
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 07142a86..865f202a 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -2877,7 +2877,7 @@ class TestFindDevs:
return msdos
elif pattern == "/dev/iso9660/*":
return iso9660
- raise Exception
+ raise RuntimeError
m_glob.side_effect = fake_glob
@@ -3026,3 +3026,18 @@ class TestVersion:
)
def test_from_str(self, str_ver, cls_ver):
assert util.Version.from_str(str_ver) == cls_ver
+
+
+@pytest.mark.allow_dns_lookup
+class TestResolvable:
+ @mock.patch.object(util, "_DNS_REDIRECT_IP", return_value=True)
+ @mock.patch.object(util.socket, "getaddrinfo")
+ def test_ips_need_not_be_resolved(self, m_getaddr, m_dns):
+ """Optimization test: dns resolution may timeout during early boot, and
+ often the urls being checked use IP addresses rather than dns names.
+ Therefore, the fast path checks if the address contains an IP and exits
+ early if the path is a valid IP.
+ """
+ assert util.is_resolvable("http://169.254.169.254/") is True
+ assert util.is_resolvable("http://[fd00:ec2::254]/") is True
+ assert not m_getaddr.called
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
index da04c6b2..e7094ec5 100644
--- a/tests/unittests/util.py
+++ b/tests/unittests/util.py
@@ -1,5 +1,4 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from typing import Optional
from unittest import mock
from cloudinit import cloud, distros, helpers
@@ -146,10 +145,6 @@ class MockDistro(distros.Distro):
def package_command(self, command, args=None, pkgs=None):
pass
- @property
- def is_virtual(self) -> Optional[bool]:
- return True
-
def update_package_sources(self):
return (True, "yay")
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index eb656d7f..a6276974 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -18,6 +18,7 @@ berolinux
bipinbachhao
BirknerAlex
bmhughes
+brianphaley
CalvoM
candlerb
cawamata
@@ -43,6 +44,7 @@ einsibjarni
emmanuelthome
eslerm
esposem
+frantisekz
GabrielNagy
garzdin
giggsoff
@@ -135,6 +137,8 @@ vorlonofportland
vteratipally
Vultaire
WebSpider
+Wind-net
+wmousa
wschoot
wynnfeng
xiachen-rh
diff --git a/tools/ds-identify b/tools/ds-identify
index da23e836..cd07565d 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -1262,13 +1262,6 @@ dscheck_OpenStack() {
*) return ${DS_MAYBE};;
esac
- # If we are on bare metal, then we maybe are on a
- # bare metal Ironic environment.
- detect_virt
- if [ "${_RET}" = "none" ]; then
- return ${DS_MAYBE}
- fi
-
return ${DS_NOT_FOUND}
}
diff --git a/tools/hook-dhclient b/tools/hook-dhclient
deleted file mode 100755
index 02122f37..00000000
--- a/tools/hook-dhclient
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# This script writes DHCP lease information into the cloud-init run directory
-# It is sourced, not executed. For more information see dhclient-script(8).
-
-is_azure() {
- local dmi_path="/sys/class/dmi/id/board_vendor" vendor=""
- if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then
- [ "$vendor" = "Microsoft Corporation" ] && return 0
- fi
- return 1
-}
-
-is_enabled() {
- # only execute hooks if cloud-init is enabled and on azure
- [ -e /run/cloud-init/enabled ] || return 1
- is_azure
-}
-
-if is_enabled; then
- case "$reason" in
- BOUND) cloud-init dhclient-hook up "$interface";;
- DOWN|RELEASE|REBOOT|STOP|EXPIRE)
- cloud-init dhclient-hook down "$interface";;
- esac
-fi
diff --git a/tools/hook-network-manager b/tools/hook-network-manager
deleted file mode 100755
index 67d9044a..00000000
--- a/tools/hook-network-manager
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# This script hooks into NetworkManager(8) via its scripts
-# arguments are 'interface-name' and 'action'
-#
-is_azure() {
- local dmi_path="/sys/class/dmi/id/board_vendor" vendor=""
- if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then
- [ "$vendor" = "Microsoft Corporation" ] && return 0
- fi
- return 1
-}
-
-is_enabled() {
- # only execute hooks if cloud-init is enabled and on azure
- [ -e /run/cloud-init/enabled ] || return 1
- is_azure
-}
-
-if is_enabled; then
- case "$1:$2" in
- *:up) exec cloud-init dhclient-hook up "$1";;
- *:down) exec cloud-init dhclient-hook down "$1";;
- esac
-fi
diff --git a/tools/hook-rhel.sh b/tools/hook-rhel.sh
deleted file mode 100755
index 513a5515..00000000
--- a/tools/hook-rhel.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# Current versions of RHEL and CentOS do not honor the directory
-# /etc/dhcp/dhclient-exit-hooks.d so this file can be placed in
-# /etc/dhcp/dhclient.d instead
-is_azure() {
- local dmi_path="/sys/class/dmi/id/board_vendor" vendor=""
- if [ -e "$dmi_path" ] && read vendor < "$dmi_path"; then
- [ "$vendor" = "Microsoft Corporation" ] && return 0
- fi
- return 1
-}
-
-is_enabled() {
- # only execute hooks if cloud-init is enabled and on azure
- [ -e /run/cloud-init/enabled ] || return 1
- is_azure
-}
-
-hook-rhel_config(){
- is_enabled || return 0
- cloud-init dhclient-hook up "$interface"
-}
-
-hook-rhel_restore(){
- is_enabled || return 0
- cloud-init dhclient-hook down "$interface"
-}