summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-05-11 11:52:54 -0500
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2021-05-11 20:20:10 +0000
commit389a4fece2ec11cfd1708029ad94612ed0de23f5 (patch)
treefef6fefca2d8ead3bc2e98f3833e25f7187de18b
parente6e83ac6f641c9991f466a8ef2545e3612eeffd7 (diff)
downloadcloud-init-git-389a4fece2ec11cfd1708029ad94612ed0de23f5.tar.gz
21.2-3-g899bfaa9-0ubuntu1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--.travis.yml11
-rw-r--r--ChangeLog63
-rw-r--r--README.md2
-rw-r--r--cloudinit/apport.py1
-rw-r--r--cloudinit/config/cc_chef.py2
-rw-r--r--cloudinit/config/cc_disk_setup.py13
-rw-r--r--cloudinit/config/cc_ntp.py4
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py2
-rw-r--r--cloudinit/config/cc_set_hostname.py14
-rw-r--r--cloudinit/config/cc_update_hostname.py8
-rw-r--r--cloudinit/config/cc_yum_add_repo.py4
-rwxr-xr-xcloudinit/distros/__init__.py9
-rw-r--r--cloudinit/distros/almalinux.py9
-rw-r--r--cloudinit/distros/freebsd.py7
-rw-r--r--cloudinit/distros/rhel.py11
-rw-r--r--cloudinit/distros/tests/test_init.py7
-rw-r--r--cloudinit/helpers.py17
-rw-r--r--cloudinit/net/__init__.py2
-rwxr-xr-xcloudinit/net/cmdline.py6
-rw-r--r--cloudinit/net/sysconfig.py22
-rw-r--r--cloudinit/net/tests/test_dhcp.py5
-rw-r--r--cloudinit/net/tests/test_init.py15
-rw-r--r--cloudinit/settings.py1
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py357
-rw-r--r--cloudinit/sources/DataSourceVultr.py147
-rw-r--r--cloudinit/sources/__init__.py12
-rwxr-xr-xcloudinit/sources/helpers/azure.py25
-rw-r--r--cloudinit/sources/helpers/vultr.py242
-rw-r--r--cloudinit/tests/test_upgrade.py7
-rw-r--r--cloudinit/tests/test_util.py35
-rw-r--r--cloudinit/util.py4
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.tmpl6
-rw-r--r--debian/changelog50
-rw-r--r--debian/cloud-init.templates6
-rw-r--r--debian/patches/cpick-83f6bbfb-Fix-unpickle-for-source-paths-missing-run_dir-863576
-rw-r--r--debian/patches/cpick-d132356c-fix-error-on-upgrade-caused-by-new-vendordata2147
-rw-r--r--debian/patches/series2
-rw-r--r--debian/po/templates.pot8
-rw-r--r--doc/examples/cloud-config-chef.txt70
-rw-r--r--doc/rtd/topics/availability.rst1
-rw-r--r--doc/rtd/topics/boot.rst2
-rw-r--r--doc/rtd/topics/datasources.rst2
-rw-r--r--doc/rtd/topics/datasources/azure.rst62
-rw-r--r--doc/rtd/topics/datasources/vultr.rst35
-rw-r--r--doc/rtd/topics/network-config.rst5
-rw-r--r--doc/rtd/topics/vendordata.rst4
-rwxr-xr-xsystemd/cloud-init-generator.tmpl2
-rw-r--r--systemd/cloud-init.service.tmpl2
-rw-r--r--templates/chef_client.rb.tmpl2
-rw-r--r--tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl504
-rw-r--r--tests/integration_tests/bugs/test_lp1813396.py2
-rw-r--r--tests/integration_tests/clouds.py8
-rw-r--r--tests/integration_tests/log_utils.py11
-rw-r--r--tests/integration_tests/modules/test_keys_to_console.py19
-rw-r--r--tests/integration_tests/modules/test_power_state_change.py2
-rw-r--r--tests/integration_tests/modules/test_set_hostname.py17
-rw-r--r--tests/integration_tests/test_upgrade.py36
-rw-r--r--tests/integration_tests/util.py49
-rw-r--r--tests/unittests/test_cli.py2
-rw-r--r--tests/unittests/test_datasource/test_azure.py341
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py24
-rw-r--r--tests/unittests/test_datasource/test_common.py2
-rw-r--r--tests/unittests/test_datasource/test_vultr.py343
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py69
-rw-r--r--tests/unittests/test_net.py54
-rw-r--r--tools/.github-cla-signers8
-rw-r--r--tools/.lp-to-git-user1
-rwxr-xr-xtools/ds-identify16
-rwxr-xr-xtools/render-cloudcfg4
-rwxr-xr-xtools/write-ssh-key-fingerprints58
-rw-r--r--tox.ini2
72 files changed, 2429 insertions, 1191 deletions
diff --git a/.travis.yml b/.travis.yml
index 690ab644..e112789a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -121,16 +121,7 @@ matrix:
# Use sudo to get a new shell where we're in the sbuild group
- sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
- ssh-keygen -P "" -q -f ~/.ssh/id_rsa
- - sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' &
- - |
- SECONDS=0
- while [ -e /proc/$! ]; do
- if [ "$SECONDS" -gt "570" ]; then
- echo -n '.'
- SECONDS=0
- fi
- sleep 10
- done
+ - sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci'
- python: 3.5
env:
TOXENV=xenial
diff --git a/ChangeLog b/ChangeLog
index 44b50410..98528249 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,66 @@
+21.2
+ - Add \r\n check for SSH keys in Azure (#889)
+ - Revert "Add support to resize rootfs if using LVM (#721)" (#887)
+ (LP: #1922742)
+ - Add Vultaire as contributor (#881) [Paul Goins]
+ - Azure: adding support for consuming userdata from IMDS (#884) [Anh Vo]
+ - test_upgrade: modify test_upgrade_package to run for more sources (#883)
+ - Fix chef module run failure when chef_license is set (#868) [Ben Hughes]
+ - Azure: Retry net metadata during nic attach for non-timeout errs (#878)
+ [aswinrajamannar]
+ - Azure: Retrieve username and hostname from IMDS (#865) [Thomas Stringer]
+ - Azure: eject the provisioning iso before reporting ready (#861) [Anh Vo]
+ - Use `partprobe` to re-read partition table if available (#856)
+ [Nicolas Bock] (LP: #1920939)
+ - fix error on upgrade caused by new vendordata2 attributes (#869)
+ (LP: #1922739)
+ - add prefer_fqdn_over_hostname config option (#859)
+ [hamalq] (LP: #1921004)
+ - Emit dots on travis to avoid timeout (#867)
+ - doc: Replace remaining references to user-scripts as a config module
+ (#866) [Ryan Harper]
+ - azure: Removing ability to invoke walinuxagent (#799) [Anh Vo]
+ - Add Vultr support (#827) [David Dymko]
+ - Fix unpickle for source paths missing run_dir (#863)
+ [lucasmoura] (LP: #1899299)
+ - sysconfig: use BONDING_MODULE_OPTS on SUSE (#831) [Jens Sandmann]
+ - bringup_static_routes: fix gateway check (#850) [Petr Fedchenkov]
+ - add hamalq user (#860) [hamalq]
+ - Add support to resize rootfs if using LVM (#721)
+ [Eduardo Otubo] (LP: #1799953)
+ - Fix mis-detecting network configuration in initramfs cmdline (#844)
+ (LP: #1919188)
+ - tools/write-ssh-key-fingerprints: do not display empty header/footer
+ (#817) [dermotbradley]
+ - Azure helper: Ensure Azure http handler sleeps between retries (#842)
+ [Johnson Shi]
+ - Fix chef apt source example (#826) [timothegenzmer]
+ - .travis.yml: generate an SSH key before running tests (#848)
+ - write passwords only to serial console, lock down cloud-init-output.log
+ (#847) (LP: #1918303)
+ - Fix apt default integration test (#845)
+ - integration_tests: bump pycloudlib dependency (#846)
+ - Fix stack trace if vendordata_raw contained an array (#837) [eb3095]
+ - archlinux: Fix broken locale logic (#841)
+ [Kristian Klausen] (LP: #1402406)
+ - Integration test for #783 (#832)
+ - integration_tests: mount more paths IN_PLACE (#838)
+ - Fix requiring device-number on EC2 derivatives (#836) (LP: #1917875)
+ - Remove the vi comment from the part-handler example (#835)
+ - net: exclude OVS internal interfaces in get_interfaces (#829)
+ (LP: #1912844)
+ - tox.ini: pass OS_* environment variables to integration tests (#830)
+ - integration_tests: add OpenStack as a platform (#804)
+ - Add flexibility to IMDS api-version (#793) [Thomas Stringer]
+ - Fix the TestApt tests using apt-key on Xenial and Hirsute (#823)
+ [Paride Legovini] (LP: #1916629)
+ - doc: remove duplicate "it" from nocloud.rst (#825) [V.I. Wood]
+ - archlinux: Use hostnamectl to set the transient hostname (#797)
+ [Kristian Klausen]
+ - cc_keys_to_console.py: Add documentation for recently added config key
+ (#824) [dermotbradley]
+ - Update cc_set_hostname documentation (#818) [Toshi Aoyama]
+
21.1
- Azure: Support for VMs without ephemeral resource disks. (#800)
[Johnson Shi] (LP: #1901011)
diff --git a/README.md b/README.md
index 435405da..01fd3b07 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 25f254e3..aadc638f 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -41,6 +41,7 @@ KNOWN_CLOUD_NAMES = [
'SmartOS',
'UpCloud',
'VMware',
+ 'Vultr',
'ZStack',
'Other'
]
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index aaf71366..7b20222e 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -70,7 +70,6 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([
'json_attribs',
'pid_file',
'encrypted_data_bag_secret',
- 'chef_license',
])
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
@@ -80,6 +79,7 @@ CHEF_RB_TPL_KEYS.extend([
'node_name',
'environment',
'validation_name',
+ 'chef_license',
])
CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
CHEF_RB_PATH = '/etc/chef/client.rb'
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index d1200694..22af3813 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -107,12 +107,12 @@ import shlex
frequency = PER_INSTANCE
# Define the commands to use
-UDEVADM_CMD = subp.which('udevadm')
SFDISK_CMD = subp.which("sfdisk")
SGDISK_CMD = subp.which("sgdisk")
LSBLK_CMD = subp.which("lsblk")
BLKID_CMD = subp.which("blkid")
BLKDEV_CMD = subp.which("blockdev")
+PARTPROBE_CMD = subp.which("partprobe")
WIPEFS_CMD = subp.which("wipefs")
LANG_C_ENV = {'LANG': 'C'}
@@ -685,13 +685,16 @@ def get_partition_layout(table_type, size, layout):
def read_parttbl(device):
"""
- Use partprobe instead of 'udevadm'. Partprobe is the only
- reliable way to probe the partition table.
+ `Partprobe` is preferred over `blkdev` since it is more reliably
+ able to probe the partition table.
"""
- blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
+ if PARTPROBE_CMD is not None:
+ probe_cmd = [PARTPROBE_CMD, device]
+ else:
+ probe_cmd = [BLKDEV_CMD, '--rereadpt', device]
util.udevadm_settle()
try:
- subp.subp(blkdev_cmd)
+ subp.subp(probe_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index e183993f..41c278ff 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -24,8 +24,8 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
-distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel',
- 'sles', 'ubuntu']
+distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse',
+ 'rhel', 'sles', 'ubuntu']
NTP_CLIENT_CONFIG = {
'chrony': {
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index a5aca038..c75dc57d 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -44,7 +44,7 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
# - read the blob of data from raw user data, and parse it as key/value
# - for each key that is found, download the content to
# the local instance/scripts directory and set them executable.
-# - the files in that directory will be run by the user-scripts module
+# - the files in that directory will be run by the scripts-user module
# Therefore, this must run before that.
#
#
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index d4017478..5a59dc32 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -19,7 +19,10 @@ A hostname and fqdn can be provided by specifying a full domain name under the
key, and the fqdn of the cloud wil be used. If a fqdn specified with the
``hostname`` key, it will be handled properly, although it is better to use
the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set,
-it is distro dependent whether ``hostname`` or ``fqdn`` is used.
+it is distro dependent whether ``hostname`` or ``fqdn`` is used,
+unless the ``prefer_fqdn_over_hostname`` option is true and fqdn is set
+it will force the use of FQDN in all distros, and if false then it will
+force the hostname use.
This module will run in the init-local stage before networking is configured
if the hostname is set by metadata or user data on the local system.
@@ -38,6 +41,7 @@ based on initial hostname.
**Config keys**::
preserve_hostname: <true/false>
+ prefer_fqdn_over_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
@@ -62,6 +66,14 @@ def handle(name, cfg, cloud, log, _args):
log.debug(("Configuration option 'preserve_hostname' is set,"
" not setting the hostname in module %s"), name)
return
+
+ # Set prefer_fqdn_over_hostname value in distro
+ hostname_fqdn = util.get_cfg_option_bool(cfg,
+ "prefer_fqdn_over_hostname",
+ None)
+ if hostname_fqdn is not None:
+ cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn)
+
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
# Check for previous successful invocation of set-hostname
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index d5f4eb5a..f4120356 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -27,6 +27,7 @@ is set, then the hostname will not be altered.
**Config keys**::
preserve_hostname: <true/false>
+ prefer_fqdn_over_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
@@ -45,6 +46,13 @@ def handle(name, cfg, cloud, log, _args):
" not updating the hostname in module %s"), name)
return
+ # Set prefer_fqdn_over_hostname value in distro
+ hostname_fqdn = util.get_cfg_option_bool(cfg,
+ "prefer_fqdn_over_hostname",
+ None)
+ if hostname_fqdn is not None:
+ cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn)
+
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 01fe683c..db513ed7 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,7 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** centos, fedora, rhel
+**Supported distros:** almalinux, centos, fedora, rhel
**Config keys**::
@@ -36,7 +36,7 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['centos', 'fedora', 'rhel']
+distros = ['almalinux', 'centos', 'fedora', 'rhel']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 220bd11f..107b928c 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -46,7 +46,7 @@ OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'freebsd': ['freebsd'],
'gentoo': ['gentoo'],
- 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
+ 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'rhel'],
'suse': ['opensuse', 'sles'],
}
@@ -79,6 +79,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
shutdown_options_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
_ci_pkl_version = 1
+ prefer_fqdn = False
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -131,6 +132,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
def get_option(self, opt_name, default=None):
return self._cfg.get(opt_name, default)
+ def set_option(self, opt_name, value=None):
+ self._cfg[opt_name] = value
+
def set_hostname(self, hostname, fqdn=None):
writeable_hostname = self._select_hostname(hostname, fqdn)
self._write_hostname(writeable_hostname, self.hostname_conf_fn)
@@ -259,6 +263,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _select_hostname(self, hostname, fqdn):
# Prefer the short hostname over the long
# fully qualified domain name
+ if util.get_cfg_option_bool(self._cfg, "prefer_fqdn_over_hostname",
+ self.prefer_fqdn) and fqdn:
+ return fqdn
if not hostname:
return fqdn
return hostname
diff --git a/cloudinit/distros/almalinux.py b/cloudinit/distros/almalinux.py
new file mode 100644
index 00000000..edb3165d
--- /dev/null
+++ b/cloudinit/distros/almalinux.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index dde34d41..9659843f 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -27,12 +27,7 @@ class Distro(cloudinit.distros.bsd.BSD):
pkg_cmd_remove_prefix = ["pkg", "remove"]
pkg_cmd_update_prefix = ["pkg", "update"]
pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
-
- def _select_hostname(self, hostname, fqdn):
- # Should be FQDN if available. See rc.conf(5) in FreeBSD
- if fqdn:
- return fqdn
- return hostname
+ prefer_fqdn = True # See rc.conf(5) in FreeBSD
def _get_add_member_to_group_cmd(self, member_name, group_name):
return ['pw', 'usermod', '-n', member_name, '-G', group_name]
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index c72f7c17..0c00a531 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -50,6 +50,10 @@ class Distro(distros.Distro):
}
}
+ # Should be fqdn if we can use it
+ # See: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/deployment_guide/ch-sysconfig # noqa: E501
+ prefer_fqdn = True
+
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
# This will be used to restrict certain
@@ -91,13 +95,6 @@ class Distro(distros.Distro):
}
rhel_util.update_sysconfig_file(out_fn, host_cfg)
- def _select_hostname(self, hostname, fqdn):
- # Should be fqdn if we can use it
- # See: https://www.centos.org/docs/5/html/Deployment_Guide-en-US/ch-sysconfig.html#s2-sysconfig-network # noqa
- if fqdn:
- return fqdn
- return hostname
-
def _read_system_hostname(self):
if self.uses_systemd():
host_fn = self.systemd_hostname_conf_fn
diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py
index db534654..fd64a322 100644
--- a/cloudinit/distros/tests/test_init.py
+++ b/cloudinit/distros/tests/test_init.py
@@ -11,10 +11,15 @@ import pytest
from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS
+# In newer versions of Python, these characters will be omitted instead
+# of substituted because of security concerns.
+# See https://bugs.python.org/issue43882
+SECURITY_URL_CHARS = '\n\r\t'
# Define a set of characters we would expect to be replaced
INVALID_URL_CHARS = [
- chr(x) for x in range(127) if chr(x) not in LDH_ASCII_CHARS
+ chr(x) for x in range(127)
+ if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS
]
for separator in [":", ".", "/", "#", "?", "@", "[", "]"]:
# Remove from the set characters that either separate hostname parts (":",
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index fc5011ec..b8f9d2c3 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -20,6 +20,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
from cloudinit import log as logging
from cloudinit import type_utils
+from cloudinit import persistence
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -317,7 +318,9 @@ class ContentHandlers(object):
return list(self.registered.items())
-class Paths(object):
+class Paths(persistence.CloudInitPickleMixin):
+ _ci_pkl_version = 1
+
def __init__(self, path_cfgs, ds=None):
self.cfgs = path_cfgs
# Populate all the initial paths
@@ -354,6 +357,18 @@ class Paths(object):
# Set when a datasource becomes active
self.datasource = ds
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ """Perform deserialization fixes for Paths."""
+ if not hasattr(self, "run_dir"):
+ # On older versions of cloud-init the Paths class do not
+ # have the run_dir attribute. This is problematic because
+ # when loading the pickle object on newer versions of cloud-init
+ # we will rely on this attribute. To fix that, we are now
+ # manually adding that attribute here.
+ self.run_dir = Paths(
+ path_cfgs=self.cfgs,
+ ds=self.datasource).run_dir
+
# get_ipath_cur: get the current instance path for an item
def get_ipath_cur(self, name=None):
return self._get_path(self.instance_link, name)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 385b7bcc..6b3b84f7 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -1135,7 +1135,7 @@ class EphemeralIPv4Network(object):
# ("0.0.0.0/0", "130.56.240.1")]
for net_address, gateway in self.static_routes:
via_arg = []
- if gateway != "0.0.0.0/0":
+ if gateway != "0.0.0.0":
via_arg = ['via', gateway]
subp.subp(
['ip', '-4', 'route', 'add', net_address] + via_arg +
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index cc8dc17b..7cdd428d 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -12,6 +12,7 @@ import gzip
import io
import logging
import os
+import shlex
from cloudinit import util
@@ -72,8 +73,9 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
(ii) an open-iscsi interface file is present in the system
"""
if self._files:
- if 'ip=' in self._cmdline or 'ip6=' in self._cmdline:
- return True
+ for item in shlex.split(self._cmdline):
+ if item.startswith('ip=') or item.startswith('ip6='):
+ return True
if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE):
# iBft can configure networking without ip=
return True
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 99a4bae4..089b44b2 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -19,7 +19,7 @@ from .network_state import (
LOG = logging.getLogger(__name__)
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
-KNOWN_DISTROS = ['centos', 'fedora', 'rhel', 'suse']
+KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'suse']
def _make_header(sep='#'):
@@ -313,7 +313,8 @@ class Renderer(renderer.Renderer):
}
# If these keys exist, then their values will be used to form
- # a BONDING_OPTS grouping; otherwise no grouping will be set.
+ # a BONDING_OPTS / BONDING_MODULE_OPTS grouping; otherwise no
+ # grouping will be set.
bond_tpl_opts = tuple([
('bond_mode', "mode=%s"),
('bond_xmit_hash_policy', "xmit_hash_policy=%s"),
@@ -622,7 +623,7 @@ class Renderer(renderer.Renderer):
route_cfg[new_key] = route[old_key]
@classmethod
- def _render_bonding_opts(cls, iface_cfg, iface):
+ def _render_bonding_opts(cls, iface_cfg, iface, flavor):
bond_opts = []
for (bond_key, value_tpl) in cls.bond_tpl_opts:
# Seems like either dash or underscore is possible?
@@ -635,7 +636,18 @@ class Renderer(renderer.Renderer):
bond_opts.append(value_tpl % (bond_value))
break
if bond_opts:
- iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
+ if flavor == 'suse':
+ # suse uses the sysconfig support which requires
+ # BONDING_MODULE_OPTS see
+ # https://www.kernel.org/doc/Documentation/networking/bonding.txt
+ # 3.1 Configuration with Sysconfig Support
+ iface_cfg['BONDING_MODULE_OPTS'] = " ".join(bond_opts)
+ else:
+ # rhel uses initscript support and thus requires BONDING_OPTS
+ # this is also the old default see
+ # https://www.kernel.org/doc/Documentation/networking/bonding.txt
+ # 3.2 Configuration with Initscripts Support
+ iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
@classmethod
def _render_physical_interfaces(
@@ -663,7 +675,7 @@ class Renderer(renderer.Renderer):
for iface in network_state.iter_interfaces(bond_filter):
iface_name = iface['name']
iface_cfg = iface_contents[iface_name]
- cls._render_bonding_opts(iface_cfg, iface)
+ cls._render_bonding_opts(iface_cfg, iface, flavor)
# Ensure that the master interface (and any of its children)
# are actually marked as being bond types...
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index 74cf4b94..6f9a02de 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -194,6 +194,11 @@ class TestDHCPParseStaticRoutes(CiTestCase):
self.assertEqual([('0.0.0.0/0', '130.56.240.1')],
parse_static_routes(rfc3442))
+ def test_unspecified_gateway(self):
+ rfc3442 = "32,169,254,169,254,0,0,0,0"
+ self.assertEqual([('169.254.169.254/32', '0.0.0.0')],
+ parse_static_routes(rfc3442))
+
def test_parse_static_routes_class_c_b_a(self):
class_c = "24,192,168,74,192,168,0,4"
class_b = "16,172,16,172,16,0,4"
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 946f8ee2..ad9c90ff 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -706,19 +706,23 @@ class TestEphemeralIPV4Network(CiTestCase):
def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp):
params = {
'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
- 'static_routes': [('169.254.169.254/32', '192.168.2.1'),
+ 'prefix_or_mask': '255.255.255.255', 'broadcast': '192.168.2.255',
+ 'static_routes': [('192.168.2.1/32', '0.0.0.0'),
+ ('169.254.169.254/32', '192.168.2.1'),
('0.0.0.0/0', '192.168.2.1')],
'router': '192.168.2.1'}
expected_setup_calls = [
mock.call(
- ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/32',
'broadcast', '192.168.2.255', 'dev', 'eth0'],
capture=True, update_env={'LANG': 'C'}),
mock.call(
['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
capture=True),
mock.call(
+ ['ip', '-4', 'route', 'add', '192.168.2.1/32',
+ 'dev', 'eth0'], capture=True),
+ mock.call(
['ip', '-4', 'route', 'add', '169.254.169.254/32',
'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
mock.call(
@@ -732,11 +736,14 @@ class TestEphemeralIPV4Network(CiTestCase):
['ip', '-4', 'route', 'del', '169.254.169.254/32',
'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
mock.call(
+ ['ip', '-4', 'route', 'del', '192.168.2.1/32',
+ 'dev', 'eth0'], capture=True),
+ mock.call(
['ip', '-family', 'inet', 'link', 'set', 'dev',
'eth0', 'down'], capture=True),
mock.call(
['ip', '-family', 'inet', 'addr', 'del',
- '192.168.2.2/24', 'dev', 'eth0'], capture=True)
+ '192.168.2.2/32', 'dev', 'eth0'], capture=True)
]
with net.EphemeralIPv4Network(**params):
self.assertEqual(expected_setup_calls, m_subp.call_args_list)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 91e1bfe7..23e4c0ad 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -30,6 +30,7 @@ CFG_BUILTIN = {
'GCE',
'OpenStack',
'AliYun',
+ 'Vultr',
'Ec2',
'CloudSigma',
'CloudStack',
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 6cae9e82..2f3390c3 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -5,6 +5,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
+from collections import namedtuple
import contextlib
import crypt
from functools import partial
@@ -16,6 +17,7 @@ from time import sleep
from xml.dom import minidom
import xml.etree.ElementTree as ET
from enum import Enum
+import requests
from cloudinit import dmi
from cloudinit import log as logging
@@ -25,6 +27,7 @@ from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers import netlink
+from cloudinit import ssh_util
from cloudinit import subp
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
from cloudinit import util
@@ -80,7 +83,12 @@ AGENT_SEED_DIR = '/var/lib/waagent'
IMDS_TIMEOUT_IN_SECONDS = 2
IMDS_URL = "http://169.254.169.254/metadata"
IMDS_VER_MIN = "2019-06-01"
-IMDS_VER_WANT = "2020-09-01"
+IMDS_VER_WANT = "2021-01-01"
+
+
+# This holds SSH key data including if the source was
+# from IMDS, as well as the SSH key data itself.
+SSHKeys = namedtuple("SSHKeys", ("keys_from_imds", "ssh_keys"))
class metadata_type(Enum):
@@ -332,6 +340,7 @@ class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
_negotiated = False
_metadata_imds = sources.UNSET
+ _ci_pkl_version = 1
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -346,8 +355,13 @@ class DataSourceAzure(sources.DataSource):
# Regenerate network config new_instance boot and every boot
self.update_events['network'].add(EventType.BOOT)
self._ephemeral_dhcp_ctx = None
-
self.failed_desired_api_version = False
+ self.iso_dev = None
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ super()._unpickle(ci_pkl_version)
+ if "iso_dev" not in self.__dict__:
+ self.iso_dev = None
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -381,57 +395,12 @@ class DataSourceAzure(sources.DataSource):
util.logexc(LOG, "handling set_hostname failed")
return False
- @azure_ds_telemetry_reporter
- def get_metadata_from_agent(self):
- temp_hostname = self.metadata.get('local-hostname')
- agent_cmd = self.ds_cfg['agent_command']
- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
- temp_hostname, agent_cmd)
-
- self.bounce_network_with_azure_hostname()
-
- try:
- invoke_agent(agent_cmd)
- except subp.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- self.ds_cfg['agent_command'])
-
- ddir = self.ds_cfg['data_dir']
-
- fp_files = []
- key_value = None
- for pk in self.cfg.get('_pubkeys', []):
- if pk.get('value', None):
- key_value = pk['value']
- LOG.debug("SSH authentication: using value from fabric")
- else:
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
- LOG.debug("SSH authentication: "
- "using fingerprint from fabric")
-
- with events.ReportEventStack(
- name="waiting-for-ssh-public-key",
- description="wait for agents to retrieve SSH keys",
- parent=azure_ds_reporter):
- # wait very long for public SSH keys to arrive
- # https://bugs.launchpad.net/cloud-init/+bug/1717611
- missing = util.log_time(logfunc=LOG.debug,
- msg="waiting for SSH public key files",
- func=util.wait_for_files,
- args=(fp_files, 900))
- if len(missing):
- LOG.warning("Did not find files, but going on: %s", missing)
-
- metadata = {}
- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
- return metadata
-
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
if self.seed.startswith('/dev'):
subplatform_type = 'config-disk'
+ elif self.seed.lower() == 'imds':
+ subplatform_type = 'imds'
else:
subplatform_type = 'seed-dir'
return '%s (%s)' % (subplatform_type, self.seed)
@@ -474,9 +443,11 @@ class DataSourceAzure(sources.DataSource):
found = None
reprovision = False
+ ovf_is_accessible = True
reprovision_after_nic_attach = False
for cdev in candidates:
try:
+ LOG.debug("cdev: %s", cdev)
if cdev == "IMDS":
ret = None
reprovision = True
@@ -503,8 +474,25 @@ class DataSourceAzure(sources.DataSource):
raise sources.InvalidMetaDataException(msg)
except util.MountFailedError:
report_diagnostic_event(
- '%s was not mountable' % cdev, logger_func=LOG.warning)
- continue
+ '%s was not mountable' % cdev, logger_func=LOG.debug)
+ cdev = 'IMDS'
+ ovf_is_accessible = False
+ empty_md = {'local-hostname': ''}
+ empty_cfg = dict(
+ system_info=dict(
+ default_user=dict(
+ name=''
+ )
+ )
+ )
+ ret = (empty_md, '', empty_cfg, {})
+
+ report_diagnostic_event("Found provisioning metadata in %s" % cdev,
+ logger_func=LOG.debug)
+
+ # save the iso device for ejection before reporting ready
+ if cdev.startswith("/dev"):
+ self.iso_dev = cdev
perform_reprovision = reprovision or self._should_reprovision(ret)
perform_reprovision_after_nic_attach = (
@@ -524,6 +512,10 @@ class DataSourceAzure(sources.DataSource):
self.fallback_interface,
retries=10
)
+ if not imds_md and not ovf_is_accessible:
+ msg = 'No OVF or IMDS available'
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
(md, userdata_raw, cfg, files) = ret
self.seed = cdev
crawled_data.update({
@@ -532,6 +524,35 @@ class DataSourceAzure(sources.DataSource):
'metadata': util.mergemanydict(
[md, {'imds': imds_md}]),
'userdata_raw': userdata_raw})
+ imds_username = _username_from_imds(imds_md)
+ imds_hostname = _hostname_from_imds(imds_md)
+ imds_disable_password = _disable_password_from_imds(imds_md)
+ if imds_username:
+ LOG.debug('Username retrieved from IMDS: %s', imds_username)
+ cfg['system_info']['default_user']['name'] = imds_username
+ if imds_hostname:
+ LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
+ crawled_data['metadata']['local-hostname'] = imds_hostname
+ if imds_disable_password:
+ LOG.debug(
+ 'Disable password retrieved from IMDS: %s',
+ imds_disable_password
+ )
+ crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
+
+ # only use userdata from imds if OVF did not provide custom data
+ # userdata provided by IMDS is always base64 encoded
+ if not userdata_raw:
+ imds_userdata = _userdata_from_imds(imds_md)
+ if imds_userdata:
+ LOG.debug("Retrieved userdata from IMDS")
+ try:
+ crawled_data['userdata_raw'] = base64.b64decode(
+ ''.join(imds_userdata.split()))
+ except Exception:
+ report_diagnostic_event(
+ "Bad userdata in IMDS",
+ logger_func=LOG.warning)
found = cdev
report_diagnostic_event(
@@ -659,7 +680,9 @@ class DataSourceAzure(sources.DataSource):
self,
fallback_nic,
retries,
- md_type=metadata_type.compute):
+ md_type=metadata_type.compute,
+ exc_cb=retry_on_url_exc,
+ infinite=False):
"""
Wrapper for get_metadata_from_imds so that we can have flexibility
in which IMDS api-version we use. If a particular instance of IMDS
@@ -679,7 +702,8 @@ class DataSourceAzure(sources.DataSource):
fallback_nic=fallback_nic,
retries=0,
md_type=md_type,
- api_version=IMDS_VER_WANT
+ api_version=IMDS_VER_WANT,
+ exc_cb=exc_cb
)
except UrlError as err:
LOG.info(
@@ -702,7 +726,9 @@ class DataSourceAzure(sources.DataSource):
fallback_nic=fallback_nic,
retries=retries,
md_type=md_type,
- api_version=IMDS_VER_MIN
+ api_version=IMDS_VER_MIN,
+ exc_cb=exc_cb,
+ infinite=infinite
)
def device_name_to_device(self, name):
@@ -711,6 +737,13 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def get_public_ssh_keys(self):
"""
+ Retrieve public SSH keys.
+ """
+
+ return self._get_public_ssh_keys_and_source().ssh_keys
+
+ def _get_public_ssh_keys_and_source(self):
+ """
Try to get the ssh keys from IMDS first, and if that fails
(i.e. IMDS is unavailable) then fallback to getting the ssh
keys from OVF.
@@ -719,30 +752,50 @@ class DataSourceAzure(sources.DataSource):
advantage, so this is a strong preference. But we must keep
OVF as a second option for environments that don't have IMDS.
"""
+
LOG.debug('Retrieving public SSH keys')
ssh_keys = []
+ keys_from_imds = True
+ LOG.debug('Attempting to get SSH keys from IMDS')
try:
- raise KeyError(
- "Not using public SSH keys from IMDS"
- )
- # pylint:disable=unreachable
ssh_keys = [
public_key['keyData']
for public_key
in self.metadata['imds']['compute']['publicKeys']
]
- LOG.debug('Retrieved SSH keys from IMDS')
+ for key in ssh_keys:
+ if not _key_is_openssh_formatted(key=key):
+ keys_from_imds = False
+ break
+
+ if not keys_from_imds:
+ log_msg = 'Keys not in OpenSSH format, using OVF'
+ else:
+ log_msg = 'Retrieved {} keys from IMDS'.format(
+ len(ssh_keys)
+ if ssh_keys is not None
+ else 0
+ )
except KeyError:
log_msg = 'Unable to get keys from IMDS, falling back to OVF'
+ keys_from_imds = False
+ finally:
report_diagnostic_event(log_msg, logger_func=LOG.debug)
+
+ if not keys_from_imds:
+ LOG.debug('Attempting to get SSH keys from OVF')
try:
ssh_keys = self.metadata['public-keys']
- LOG.debug('Retrieved keys from OVF')
+ log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys))
except KeyError:
log_msg = 'No keys available from OVF'
+ finally:
report_diagnostic_event(log_msg, logger_func=LOG.debug)
- return ssh_keys
+ return SSHKeys(
+ keys_from_imds=keys_from_imds,
+ ssh_keys=ssh_keys
+ )
def get_config_obj(self):
return self.cfg
@@ -905,6 +958,9 @@ class DataSourceAzure(sources.DataSource):
is_primary = False
expected_nic_count = -1
imds_md = None
+ metadata_poll_count = 0
+ metadata_logging_threshold = 1
+ metadata_timeout_count = 0
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
@@ -929,14 +985,48 @@ class DataSourceAzure(sources.DataSource):
% (ifname, e), logger_func=LOG.error)
raise
+ # Retry polling network metadata for a limited duration only when the
+ # calls fail due to timeout. This is because the platform drops packets
+ # going towards IMDS when it is not a primary nic. If the calls fail
+ # due to other issues like 410, 503 etc, then it means we are primary
+ # but IMDS service is unavailable at the moment. Retry indefinitely in
+ # those cases since we cannot move on without the network metadata.
+ def network_metadata_exc_cb(msg, exc):
+ nonlocal metadata_timeout_count, metadata_poll_count
+ nonlocal metadata_logging_threshold
+
+ metadata_poll_count = metadata_poll_count + 1
+
+ # Log when needed but back off exponentially to avoid exploding
+ # the log file.
+ if metadata_poll_count >= metadata_logging_threshold:
+ metadata_logging_threshold *= 2
+ report_diagnostic_event(
+ "Ran into exception when attempting to reach %s "
+ "after %d polls." % (msg, metadata_poll_count),
+ logger_func=LOG.error)
+
+ if isinstance(exc, UrlError):
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exc.cause, exc.code),
+ logger_func=LOG.error)
+
+ if exc.cause and isinstance(exc.cause, requests.Timeout):
+ metadata_timeout_count = metadata_timeout_count + 1
+ return (metadata_timeout_count <= 10)
+ return True
+
# Primary nic detection will be optimized in the future. The fact that
# primary nic is being attached first helps here. Otherwise each nic
# could add several seconds of delay.
try:
imds_md = self.get_imds_data_with_api_fallback(
ifname,
- 5,
- metadata_type.network
+ 0,
+ metadata_type.network,
+ network_metadata_exc_cb,
+ True
)
except Exception as e:
LOG.warning(
@@ -1273,7 +1363,9 @@ class DataSourceAzure(sources.DataSource):
@return: The success status of sending the ready signal.
"""
try:
- get_metadata_from_fabric(None, lease['unknown-245'])
+ get_metadata_from_fabric(fallback_lease_file=None,
+ dhcp_opts=lease['unknown-245'],
+ iso_dev=self.iso_dev)
return True
except Exception as e:
report_diagnostic_event(
@@ -1354,35 +1446,24 @@ class DataSourceAzure(sources.DataSource):
On failure, returns False.
"""
- if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
- self.bounce_network_with_azure_hostname()
+ self.bounce_network_with_azure_hostname()
- pubkey_info = None
- try:
- raise KeyError(
- "Not using public SSH keys from IMDS"
- )
- # pylint:disable=unreachable
- public_keys = self.metadata['imds']['compute']['publicKeys']
- LOG.debug(
- 'Successfully retrieved %s key(s) from IMDS',
- len(public_keys)
- if public_keys is not None
- else 0
- )
- except KeyError:
- LOG.debug(
- 'Unable to retrieve SSH keys from IMDS during '
- 'negotiation, falling back to OVF'
- )
- pubkey_info = self.cfg.get('_pubkeys', None)
+ pubkey_info = None
+ ssh_keys_and_source = self._get_public_ssh_keys_and_source()
- metadata_func = partial(get_metadata_from_fabric,
- fallback_lease_file=self.
- dhclient_lease_file,
- pubkey_info=pubkey_info)
- else:
- metadata_func = self.get_metadata_from_agent
+ if not ssh_keys_and_source.keys_from_imds:
+ pubkey_info = self.cfg.get('_pubkeys', None)
+ log_msg = 'Retrieved {} fingerprints from OVF'.format(
+ len(pubkey_info)
+ if pubkey_info is not None
+ else 0
+ )
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+
+ metadata_func = partial(get_metadata_from_fabric,
+ fallback_lease_file=self.
+ dhclient_lease_file,
+ pubkey_info=pubkey_info)
LOG.debug("negotiating with fabric via agent command %s",
self.ds_cfg['agent_command'])
@@ -1438,6 +1519,51 @@ class DataSourceAzure(sources.DataSource):
return self.metadata.get('imds', {}).get('compute', {}).get('location')
+def _username_from_imds(imds_data):
+ try:
+ return imds_data['compute']['osProfile']['adminUsername']
+ except KeyError:
+ return None
+
+
+def _userdata_from_imds(imds_data):
+ try:
+ return imds_data['compute']['userData']
+ except KeyError:
+ return None
+
+
+def _hostname_from_imds(imds_data):
+ try:
+ return imds_data['compute']['osProfile']['computerName']
+ except KeyError:
+ return None
+
+
+def _disable_password_from_imds(imds_data):
+ try:
+ return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true' # noqa: E501
+ except KeyError:
+ return None
+
+
+def _key_is_openssh_formatted(key):
+ """
+ Validate whether or not the key is OpenSSH-formatted.
+ """
+ # See https://bugs.launchpad.net/cloud-init/+bug/1910835
+ if '\r\n' in key.strip():
+ return False
+
+ parser = ssh_util.AuthKeyLineParser()
+ try:
+ akl = parser.parse(key)
+ except TypeError:
+ return False
+
+ return akl.keytype is not None
+
+
def _partitions_on_device(devpath, maxnum=16):
# return a list of tuples (ptnum, path) for each part on devpath
for suff in ("-part", "p", ""):
@@ -1618,33 +1744,6 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
@azure_ds_telemetry_reporter
-def crtfile_to_pubkey(fname, data=None):
- pipeline = ('openssl x509 -noout -pubkey < "$0" |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
- capture=True, data=data)
- return out.rstrip()
-
-
-@azure_ds_telemetry_reporter
-def pubkeys_from_crt_files(flist):
- pubkeys = []
- errors = []
- for fname in flist:
- try:
- pubkeys.append(crtfile_to_pubkey(fname))
- except subp.ProcessExecutionError:
- errors.append(fname)
-
- if errors:
- report_diagnostic_event(
- "failed to convert the crt files to pubkey: %s" % errors,
- logger_func=LOG.warning)
-
- return pubkeys
-
-
-@azure_ds_telemetry_reporter
def write_files(datadir, files, dirmode=None):
def _redact_password(cnt, fname):
@@ -1672,16 +1771,6 @@ def write_files(datadir, files, dirmode=None):
util.write_file(filename=fname, content=content, mode=0o600)
-@azure_ds_telemetry_reporter
-def invoke_agent(cmd):
- # this is a function itself to simplify patching it for test
- if cmd:
- LOG.debug("invoking agent: %s", cmd)
- subp.subp(cmd, shell=(not isinstance(cmd, list)))
- else:
- LOG.debug("not invoking agent")
-
-
def find_child(node, filter_func):
ret = []
if not node.hasChildNodes():
@@ -2117,7 +2206,9 @@ def _generate_network_config_from_fallback_config() -> dict:
def get_metadata_from_imds(fallback_nic,
retries,
md_type=metadata_type.compute,
- api_version=IMDS_VER_MIN):
+ api_version=IMDS_VER_MIN,
+ exc_cb=retry_on_url_exc,
+ infinite=False):
"""Query Azure's instance metadata service, returning a dictionary.
If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
@@ -2136,7 +2227,7 @@ def get_metadata_from_imds(fallback_nic,
kwargs = {'logfunc': LOG.debug,
'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
'func': _get_metadata_from_imds,
- 'args': (retries, md_type, api_version,)}
+ 'args': (retries, exc_cb, md_type, api_version, infinite)}
if net.is_up(fallback_nic):
return util.log_time(**kwargs)
else:
@@ -2154,14 +2245,16 @@ def get_metadata_from_imds(fallback_nic,
@azure_ds_telemetry_reporter
def _get_metadata_from_imds(
retries,
+ exc_cb,
md_type=metadata_type.compute,
- api_version=IMDS_VER_MIN):
+ api_version=IMDS_VER_MIN,
+ infinite=False):
url = "{}?api-version={}".format(md_type.value, api_version)
headers = {"Metadata": "true"}
try:
response = readurl(
url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
- retries=retries, exception_cb=retry_on_url_exc)
+ retries=retries, exception_cb=exc_cb, infinite=infinite)
except Exception as e:
# pylint:disable=no-member
if isinstance(e, UrlError) and e.code == 400:
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
new file mode 100644
index 00000000..c08ff848
--- /dev/null
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -0,0 +1,147 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+from cloudinit import log as log
+from cloudinit import sources
+from cloudinit import util
+
+import cloudinit.sources.helpers.vultr as vultr
+
+LOG = log.getLogger(__name__)
+BUILTIN_DS_CONFIG = {
+ 'url': 'http://169.254.169.254',
+ 'retries': 30,
+ 'timeout': 2,
+ 'wait': 2
+}
+
+
+class DataSourceVultr(sources.DataSource):
+
+ dsname = 'Vultr'
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceVultr, self).__init__(sys_cfg, distro, paths)
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
+ BUILTIN_DS_CONFIG])
+
+ # Initiate data and check if Vultr
+ def _get_data(self):
+ LOG.debug("Detecting if machine is a Vultr instance")
+ if not vultr.is_vultr():
+ LOG.debug("Machine is not a Vultr instance")
+ return False
+
+ LOG.debug("Machine is a Vultr instance")
+
+ # Fetch metadata
+ md = self.get_metadata()
+
+ self.metadata_full = md
+ self.metadata['instanceid'] = md['instanceid']
+ self.metadata['local-hostname'] = md['hostname']
+ self.metadata['public-keys'] = md["public-keys"]
+ self.userdata_raw = md["user-data"]
+
+ # Generate config and process data
+ self.get_datasource_data(md)
+
+ # Dump some data so diagnosing failures is manageable
+ LOG.debug("Vultr Vendor Config:")
+ LOG.debug(md['vendor-data']['config'])
+ LOG.debug("SUBID: %s", self.metadata['instanceid'])
+ LOG.debug("Hostname: %s", self.metadata['local-hostname'])
+ if self.userdata_raw is not None:
+ LOG.debug("User-Data:")
+ LOG.debug(self.userdata_raw)
+
+ return True
+
+ # Process metadata
+ def get_datasource_data(self, md):
+ # Grab config
+ config = md['vendor-data']['config']
+
+ # Generate network config
+ self.netcfg = vultr.generate_network_config(md['interfaces'])
+
+ # This requires info generated in the vendor config
+ user_scripts = vultr.generate_user_scripts(md, self.netcfg['config'])
+
+ # Default hostname is "guest" for whitelabel
+ if self.metadata['local-hostname'] == "":
+ self.metadata['local-hostname'] = "guest"
+
+ self.userdata_raw = md["user-data"]
+ if self.userdata_raw == "":
+ self.userdata_raw = None
+
+ # Assemble vendor-data
+ # This adds provided scripts and the config
+ self.vendordata_raw = []
+ self.vendordata_raw.extend(user_scripts)
+ self.vendordata_raw.append("#cloud-config\n%s" % config)
+
+ # Get the metadata by flag
+ def get_metadata(self):
+ return vultr.get_metadata(self.ds_cfg['url'],
+ self.ds_cfg['timeout'],
+ self.ds_cfg['retries'],
+ self.ds_cfg['wait'])
+
+ # Compare subid as instance id
+ def check_instance_id(self, sys_cfg):
+ if not vultr.is_vultr():
+ return False
+
+ # Baremetal has no way to implement this in local
+ if vultr.is_baremetal():
+ return False
+
+ subid = vultr.get_sysinfo()['subid']
+ return sources.instance_id_matches_system_uuid(subid)
+
+ # Currently unsupported
+ @property
+ def launch_index(self):
+ return None
+
+ @property
+ def network_config(self):
+ return self.netcfg
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVultr, (sources.DEP_FILESYSTEM, )),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import sys
+
+ if not vultr.is_vultr():
+ print("Machine is not a Vultr instance")
+ sys.exit(1)
+
+ md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'],
+ BUILTIN_DS_CONFIG['timeout'],
+ BUILTIN_DS_CONFIG['retries'],
+ BUILTIN_DS_CONFIG['wait'])
+ config = md['vendor-data']['config']
+ sysinfo = vultr.get_sysinfo()
+
+ print(util.json_dumps(sysinfo))
+ print(config)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 1ad1880d..7d74f8d9 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -24,6 +24,7 @@ from cloudinit import util
from cloudinit.atomic_helper import write_json
from cloudinit.event import EventType
from cloudinit.filters import launch_index
+from cloudinit.persistence import CloudInitPickleMixin
from cloudinit.reporting import events
DSMODE_DISABLED = "disabled"
@@ -134,7 +135,7 @@ URLParams = namedtuple(
'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
-class DataSource(metaclass=abc.ABCMeta):
+class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
default_locale = 'en_US.UTF-8'
@@ -196,6 +197,8 @@ class DataSource(metaclass=abc.ABCMeta):
# non-root users
sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
+ _ci_pkl_version = 1
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
@@ -218,6 +221,13 @@ class DataSource(metaclass=abc.ABCMeta):
else:
self.ud_proc = ud_proc
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ """Perform deserialization fixes for Paths."""
+ if not hasattr(self, 'vendordata2'):
+ self.vendordata2 = None
+ if not hasattr(self, 'vendordata2_raw'):
+ self.vendordata2_raw = None
+
def __str__(self):
return type_utils.obj_name(self)
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index d3055d08..ad476076 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -303,6 +303,7 @@ def http_with_retries(url, **kwargs) -> str:
max_readurl_attempts = 240
default_readurl_timeout = 5
+ sleep_duration_between_retries = 5
periodic_logging_attempts = 12
if 'timeout' not in kwargs:
@@ -338,6 +339,7 @@ def http_with_retries(url, **kwargs) -> str:
'attempt %d with exception: %s' %
(url, attempt, e),
logger_func=LOG.debug)
+ time.sleep(sleep_duration_between_retries)
raise exc
@@ -863,7 +865,19 @@ class WALinuxAgentShim:
return endpoint_ip_address
@azure_ds_telemetry_reporter
- def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict:
+ def eject_iso(self, iso_dev) -> None:
+ try:
+ LOG.debug("Ejecting the provisioning iso")
+ subp.subp(['eject', iso_dev])
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed ejecting the provisioning iso: %s" % e,
+ logger_func=LOG.debug)
+
+ @azure_ds_telemetry_reporter
+ def register_with_azure_and_fetch_data(self,
+ pubkey_info=None,
+ iso_dev=None) -> dict:
"""Gets the VM's GoalState from Azure, uses the GoalState information
to report ready/send the ready signal/provisioning complete signal to
Azure, and then uses pubkey_info to filter and obtain the user's
@@ -889,6 +903,10 @@ class WALinuxAgentShim:
ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
health_reporter = GoalStateHealthReporter(
goal_state, self.azure_endpoint_client, self.endpoint)
+
+ if iso_dev is not None:
+ self.eject_iso(iso_dev)
+
health_reporter.send_ready_signal()
return {'public-keys': ssh_keys}
@@ -1044,11 +1062,12 @@ class WALinuxAgentShim:
@azure_ds_telemetry_reporter
def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
- pubkey_info=None):
+ pubkey_info=None, iso_dev=None):
shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
dhcp_options=dhcp_opts)
try:
- return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
+ return shim.register_with_azure_and_fetch_data(
+ pubkey_info=pubkey_info, iso_dev=iso_dev)
finally:
shim.clean_up()
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
new file mode 100644
index 00000000..c22cd0b1
--- /dev/null
+++ b/cloudinit/sources/helpers/vultr.py
@@ -0,0 +1,242 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import log as log
+from cloudinit import url_helper
+from cloudinit import dmi
+from cloudinit import util
+from cloudinit import net
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from functools import lru_cache
+
+# Get LOG
+LOG = log.getLogger(__name__)
+
+
+@lru_cache()
+def get_metadata(url, timeout, retries, sec_between):
+ # Bring up interface
+ try:
+ with EphemeralDHCPv4(connectivity_url=url):
+ # Fetch the metadata
+ v1 = read_metadata(url, timeout, retries, sec_between)
+ except (NoDHCPLeaseError) as exc:
+ LOG.error("Bailing, DHCP Exception: %s", exc)
+ raise
+
+ v1_json = json.loads(v1)
+ metadata = v1_json
+
+ return metadata
+
+
+# Read the system information from SMBIOS
+def get_sysinfo():
+ return {
+ 'manufacturer': dmi.read_dmi_data("system-manufacturer"),
+ 'subid': dmi.read_dmi_data("system-serial-number")
+ }
+
+
+# Assumes is Vultr is already checked
+def is_baremetal():
+ if get_sysinfo()['manufacturer'] != "Vultr":
+ return True
+ return False
+
+
+# Confirm is Vultr
+def is_vultr():
+ # VC2, VDC, and HFC use DMI
+ sysinfo = get_sysinfo()
+
+ if sysinfo['manufacturer'] == "Vultr":
+ return True
+
+ # Baremetal requires a kernel parameter
+ if "vultr" in util.get_cmdline().split():
+ return True
+
+ return False
+
+
+# Read Metadata endpoint
+def read_metadata(url, timeout, retries, sec_between):
+ url = "%s/v1.json" % url
+ response = url_helper.readurl(url,
+ timeout=timeout,
+ retries=retries,
+ headers={'Metadata-Token': 'vultr'},
+ sec_between=sec_between)
+
+ if not response.ok():
+ raise RuntimeError("Failed to connect to %s: Code: %s" %
+ url, response.code)
+
+ return response.contents.decode()
+
+
+# Wrapped for caching
+@lru_cache()
+def get_interface_map():
+ return net.get_interfaces_by_mac()
+
+
+# Convert macs to nics
+def get_interface_name(mac):
+ macs_to_nic = get_interface_map()
+
+ if mac not in macs_to_nic:
+ return None
+
+ return macs_to_nic.get(mac)
+
+
+# Generate network configs
+def generate_network_config(interfaces):
+ network = {
+ "version": 1,
+ "config": [
+ {
+ "type": "nameserver",
+ "address": [
+ "108.61.10.10"
+ ]
+ }
+ ]
+ }
+
+ # Prepare interface 0, public
+ if len(interfaces) > 0:
+ public = generate_public_network_interface(interfaces[0])
+ network['config'].append(public)
+
+ # Prepare interface 1, private
+ if len(interfaces) > 1:
+ private = generate_private_network_interface(interfaces[1])
+ network['config'].append(private)
+
+ return network
+
+
+# Input Metadata and generate public network config part
+def generate_public_network_interface(interface):
+ interface_name = get_interface_name(interface['mac'])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system" %
+ interface['mac'])
+
+ netcfg = {
+ "name": interface_name,
+ "type": "physical",
+ "mac_address": interface['mac'],
+ "accept-ra": 1,
+ "subnets": [
+ {
+ "type": "dhcp",
+ "control": "auto"
+ },
+ {
+ "type": "dhcp6",
+ "control": "auto"
+ },
+ ]
+ }
+
+ # Check for additional IP's
+ additional_count = len(interface['ipv4']['additional'])
+ if "ipv4" in interface and additional_count > 0:
+ for additional in interface['ipv4']['additional']:
+ add = {
+ "type": "static",
+ "control": "auto",
+ "address": additional['address'],
+ "netmask": additional['netmask']
+ }
+ netcfg['subnets'].append(add)
+
+ # Check for additional IPv6's
+ additional_count = len(interface['ipv6']['additional'])
+ if "ipv6" in interface and additional_count > 0:
+ for additional in interface['ipv6']['additional']:
+ add = {
+ "type": "static6",
+ "control": "auto",
+ "address": additional['address'],
+ "netmask": additional['netmask']
+ }
+ netcfg['subnets'].append(add)
+
+ # Add config to template
+ return netcfg
+
+
+# Input Metadata and generate private network config part
+def generate_private_network_interface(interface):
+ interface_name = get_interface_name(interface['mac'])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system" %
+ interface['mac'])
+
+ netcfg = {
+ "name": interface_name,
+ "type": "physical",
+ "mac_address": interface['mac'],
+ "accept-ra": 1,
+ "subnets": [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": interface['ipv4']['address'],
+ "netmask": interface['ipv4']['netmask']
+ }
+ ]
+ }
+
+ return netcfg
+
+
+# This is for the vendor and startup scripts
+def generate_user_scripts(md, network_config):
+ user_scripts = []
+
+ # Raid 1 script
+ if md['vendor-data']['raid1-script']:
+ user_scripts.append(md['vendor-data']['raid1-script'])
+
+ # Enable multi-queue on linux
+ if util.is_Linux() and md['vendor-data']['ethtool-script']:
+ ethtool_script = md['vendor-data']['ethtool-script']
+
+ # Tool location
+ tool = "/opt/vultr/ethtool"
+
+ # Go through the interfaces
+ for netcfg in network_config:
+ # If the interface has a mac and is physical
+ if "mac_address" in netcfg and netcfg['type'] == "physical":
+ # Set its multi-queue to num of cores as per RHEL Docs
+ name = netcfg['name']
+ command = "%s -L %s combined $(nproc --all)" % (tool, name)
+ ethtool_script = '%s\n%s' % (ethtool_script, command)
+
+ user_scripts.append(ethtool_script)
+
+ # This is for vendor scripts
+ if md['vendor-data']['vendor-script']:
+ user_scripts.append(md['vendor-data']['vendor-script'])
+
+ # Startup script
+ script = md['startup-script']
+ if script and script != "echo No configured startup script":
+ user_scripts.append(script)
+
+ return user_scripts
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py
index f79a2536..da3ab23b 100644
--- a/cloudinit/tests/test_upgrade.py
+++ b/cloudinit/tests/test_upgrade.py
@@ -43,3 +43,10 @@ class TestUpgrade:
def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl):
"""We always expect Networking.blacklist_drivers to be initialised."""
assert previous_obj_pkl.distro.networking.blacklist_drivers is None
+
+ def test_paths_has_run_dir_attribute(self, previous_obj_pkl):
+ assert previous_obj_pkl.paths.run_dir is not None
+
+ def test_vendordata_exists(self, previous_obj_pkl):
+ assert previous_obj_pkl.vendordata2 is None
+ assert previous_obj_pkl.vendordata2_raw is None
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index e811917e..a4c02877 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -107,12 +107,31 @@ OS_RELEASE_REDHAT_7 = dedent("""\
REDHAT_SUPPORT_PRODUCT_VERSION="7.5"
""")
+OS_RELEASE_ALMALINUX_8 = dedent("""\
+ NAME="AlmaLinux"
+ VERSION="8.3 (Purple Manul)"
+ ID="almalinux"
+ ID_LIKE="rhel centos fedora"
+ VERSION_ID="8.3"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)"
+ ANSI_COLOR="0;34"
+ CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA"
+ HOME_URL="https://almalinux.org/"
+ BUG_REPORT_URL="https://bugs.almalinux.org/"
+
+ ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8"
+ ALMALINUX_MANTISBT_PROJECT_VERSION="8.3"
+""")
+
REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
REDHAT_RELEASE_REDHAT_6 = (
"Red Hat Enterprise Linux Server release 6.10 (Santiago)")
REDHAT_RELEASE_REDHAT_7 = (
"Red Hat Enterprise Linux Server release 7.5 (Maipo)")
+REDHAT_RELEASE_ALMALINUX_8 = (
+ "AlmaLinux release 8.3 (Purple Manul)")
OS_RELEASE_DEBIAN = dedent("""\
@@ -503,6 +522,22 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('centos', '7', 'Core'), dist)
@mock.patch('cloudinit.util.load_file')
+ def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify almalinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists):
+ """Verify almalinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_ALMALINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
def test_get_linux_debian(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on Debian."""
m_os_release.return_value = OS_RELEASE_DEBIAN
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 4e0a72db..fdea1181 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -521,8 +521,8 @@ def system_info():
if system == "linux":
linux_dist = info['dist'][0].lower()
if linux_dist in (
- 'alpine', 'arch', 'centos', 'debian', 'fedora', 'rhel',
- 'suse'):
+ 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora',
+ 'rhel', 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 94afd60d..be47aff3 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "21.1"
+__VERSION__ = "21.2"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 7171aaa5..8656daa7 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -21,7 +21,7 @@ disable_root: false
disable_root: true
{% endif %}
-{% if variant in ["alpine", "amazon", "centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", "rhel"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
@@ -153,7 +153,7 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["alpine", "amazon", "arch", "centos", "debian",
+{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
"fedora", "freebsd", "netbsd", "openbsd", "rhel",
"suse", "ubuntu"] %}
distro: {{ variant }}
@@ -206,7 +206,7 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["alpine", "amazon", "arch", "centos", "fedora",
+{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora",
"rhel", "suse"] %}
# Default user name + that default users groups (if added/used)
default_user:
diff --git a/debian/changelog b/debian/changelog
index 008420de..4dd0fed8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,53 @@
+cloud-init (21.2-3-g899bfaa9-0ubuntu1) impish; urgency=medium
+
+ * drop the following cherry-picks now included:
+ + cpick-83f6bbfb-Fix-unpickle-for-source-paths-missing-run_dir-863
+ + cpick-d132356c-fix-error-on-upgrade-caused-by-new-vendordata2
+ * New upstream snapshot.
+ - Update test characters in substitution unit test (#893)
+ - cc_disk_setup.py: remove UDEVADM_CMD definition as not used (#886)
+ [dermotbradley]
+ - Add AlmaLinux OS support (#872) [Andrew Lukoshko]
+ - Release 21.2 (#890) (LP: #1927254)
+ - Add \r\n check for SSH keys in Azure (#889)
+ - Revert "Add support to resize rootfs if using LVM (#721)" (#887)
+ (LP: #1922742)
+ - Add Vultaire as contributor (#881) [Paul Goins]
+ - Azure: adding support for consuming userdata from IMDS (#884) [Anh Vo]
+ - test_upgrade: modify test_upgrade_package to run for more sources (#883)
+ - Fix chef module run failure when chef_license is set (#868) [Ben Hughes]
+ - Azure: Retry net metadata during nic attach for non-timeout errs (#878)
+ [aswinrajamannar]
+ - Azure: Retrieve username and hostname from IMDS (#865) [Thomas Stringer]
+ - Azure: eject the provisioning iso before reporting ready (#861) [Anh Vo]
+ - Use `partprobe` to re-read partition table if available (#856)
+ [Nicolas Bock] (LP: #1920939)
+ - fix error on upgrade caused by new vendordata2 attributes (#869)
+ (LP: #1922739)
+ - add prefer_fqdn_over_hostname config option (#859)
+ [hamalq] (LP: #1921004)
+ - Emit dots on travis to avoid timeout (#867)
+ - doc: Replace remaining references to user-scripts as a config module
+ (#866) [Ryan Harper]
+ - azure: Removing ability to invoke walinuxagent (#799) [Anh Vo]
+ - Add Vultr support (#827) [David Dymko]
+ - Fix unpickle for source paths missing run_dir (#863)
+ [lucasmoura] (LP: #1899299)
+ - sysconfig: use BONDING_MODULE_OPTS on SUSE (#831) [Jens Sandmann]
+ - bringup_static_routes: fix gateway check (#850) [Petr Fedchenkov]
+ - add hamalq user (#860) [hamalq]
+ - Add support to resize rootfs if using LVM (#721)
+ [Eduardo Otubo] (LP: #1799953)
+ - Fix mis-detecting network configuration in initramfs cmdline (#844)
+ (LP: #1919188)
+ - tools/write-ssh-key-fingerprints: do not display empty header/footer
+ (#817) [dermotbradley]
+ - Azure helper: Ensure Azure http handler sleeps between retries (#842)
+ [Johnson Shi]
+ - Fix chef apt source example (#826) [timothegenzmer]
+
+ -- James Falcon <james.falcon@canonical.com> Tue, 11 May 2021 11:52:54 -0500
+
cloud-init (21.1-19-gbad84ad4-0ubuntu3) hirsute; urgency=medium
* cherry-pick 83f6bbfb: Fix unpickle for source paths missing run_dir
diff --git a/debian/cloud-init.templates b/debian/cloud-init.templates
index 2ff1bceb..13f6df8d 100644
--- a/debian/cloud-init.templates
+++ b/debian/cloud-init.templates
@@ -1,8 +1,8 @@
Template: cloud-init/datasources
Type: multiselect
-Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, None
-Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, None
-__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, None: Failsafe datasource
+Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, Vultr, None
+Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, Vultr, None
+__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, Vultr: Vultr Cloud, None: Failsafe datasource
_Description: Which data sources should be searched?
Cloud-init supports searching different "Data Sources" for information
that it uses to configure a cloud instance.
diff --git a/debian/patches/cpick-83f6bbfb-Fix-unpickle-for-source-paths-missing-run_dir-863 b/debian/patches/cpick-83f6bbfb-Fix-unpickle-for-source-paths-missing-run_dir-863
deleted file mode 100644
index d80de236..00000000
--- a/debian/patches/cpick-83f6bbfb-Fix-unpickle-for-source-paths-missing-run_dir-863
+++ /dev/null
@@ -1,576 +0,0 @@
-From 83f6bbfbe5b924be61a3c098f4202377d69c8947 Mon Sep 17 00:00:00 2001
-From: lucasmoura <lucas.moura@canonical.com>
-Date: Mon, 12 Apr 2021 13:22:22 -0300
-Subject: [PATCH] Fix unpickle for source paths missing run_dir (#863)
-
-On the datasource class, we require the use of paths.run_dir to
-perform some operations. On older cloud-init version, the
-Paths class does not have the run_dir attribute. To fix that,
-we are now manually adding that attribute in the Paths
-object if doesn't exist in the unpickle operation.
-
-LP: #1899299
----
- cloudinit/helpers.py | 17 +-
- cloudinit/tests/test_upgrade.py | 3 +
- .../data/old_pickles/trusty-14.04.1-0.7.5.pkl | 504 ++++++++++++++++++
- 3 files changed, 523 insertions(+), 1 deletion(-)
- create mode 100644 tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
-
---- a/cloudinit/helpers.py
-+++ b/cloudinit/helpers.py
-@@ -20,6 +20,7 @@ from cloudinit.settings import (PER_INST
-
- from cloudinit import log as logging
- from cloudinit import type_utils
-+from cloudinit import persistence
- from cloudinit import util
-
- LOG = logging.getLogger(__name__)
-@@ -317,7 +318,9 @@ class ContentHandlers(object):
- return list(self.registered.items())
-
-
--class Paths(object):
-+class Paths(persistence.CloudInitPickleMixin):
-+ _ci_pkl_version = 1
-+
- def __init__(self, path_cfgs, ds=None):
- self.cfgs = path_cfgs
- # Populate all the initial paths
-@@ -354,6 +357,18 @@ class Paths(object):
- # Set when a datasource becomes active
- self.datasource = ds
-
-+ def _unpickle(self, ci_pkl_version: int) -> None:
-+ """Perform deserialization fixes for Paths."""
-+ if not hasattr(self, "run_dir"):
-+ # On older versions of cloud-init the Paths class do not
-+ # have the run_dir attribute. This is problematic because
-+ # when loading the pickle object on newer versions of cloud-init
-+ # we will rely on this attribute. To fix that, we are now
-+ # manually adding that attribute here.
-+ self.run_dir = Paths(
-+ path_cfgs=self.cfgs,
-+ ds=self.datasource).run_dir
-+
- # get_ipath_cur: get the current instance path for an item
- def get_ipath_cur(self, name=None):
- return self._get_path(self.instance_link, name)
---- a/cloudinit/tests/test_upgrade.py
-+++ b/cloudinit/tests/test_upgrade.py
-@@ -43,3 +43,6 @@ class TestUpgrade:
- def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl):
- """We always expect Networking.blacklist_drivers to be initialised."""
- assert previous_obj_pkl.distro.networking.blacklist_drivers is None
-+
-+ def test_paths_has_run_dir_attribute(self, previous_obj_pkl):
-+ assert previous_obj_pkl.paths.run_dir is not None
---- /dev/null
-+++ b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
-@@ -0,0 +1,504 @@
-+ccopy_reg
-+_reconstructor
-+p1
-+(ccloudinit.sources.DataSourceNoCloud
-+DataSourceNoCloudNet
-+p2
-+c__builtin__
-+object
-+p3
-+NtRp4
-+(dp5
-+S'paths'
-+p6
-+g1
-+(ccloudinit.helpers
-+Paths
-+p7
-+g3
-+NtRp8
-+(dp9
-+S'lookups'
-+p10
-+(dp11
-+S'cloud_config'
-+p12
-+S'cloud-config.txt'
-+p13
-+sS'userdata'
-+p14
-+S'user-data.txt.i'
-+p15
-+sS'vendordata'
-+p16
-+S'vendor-data.txt.i'
-+p17
-+sS'userdata_raw'
-+p18
-+S'user-data.txt'
-+p19
-+sS'boothooks'
-+p20
-+g20
-+sS'scripts'
-+p21
-+g21
-+sS'sem'
-+p22
-+g22
-+sS'data'
-+p23
-+g23
-+sS'vendor_scripts'
-+p24
-+S'scripts/vendor'
-+p25
-+sS'handlers'
-+p26
-+g26
-+sS'obj_pkl'
-+p27
-+S'obj.pkl'
-+p28
-+sS'vendordata_raw'
-+p29
-+S'vendor-data.txt'
-+p30
-+sS'vendor_cloud_config'
-+p31
-+S'vendor-cloud-config.txt'
-+p32
-+ssS'template_tpl'
-+p33
-+S'/etc/cloud/templates/%s.tmpl'
-+p34
-+sS'cfgs'
-+p35
-+(dp36
-+S'cloud_dir'
-+p37
-+S'/var/lib/cloud/'
-+p38
-+sS'templates_dir'
-+p39
-+S'/etc/cloud/templates/'
-+p40
-+sS'upstart_dir'
-+p41
-+S'/etc/init/'
-+p42
-+ssS'cloud_dir'
-+p43
-+g38
-+sS'datasource'
-+p44
-+NsS'upstart_conf_d'
-+p45
-+g42
-+sS'boot_finished'
-+p46
-+S'/var/lib/cloud/instance/boot-finished'
-+p47
-+sS'instance_link'
-+p48
-+S'/var/lib/cloud/instance'
-+p49
-+sS'seed_dir'
-+p50
-+S'/var/lib/cloud/seed'
-+p51
-+sbsS'supported_seed_starts'
-+p52
-+(S'http://'
-+S'https://'
-+S'ftp://'
-+tp53
-+sS'sys_cfg'
-+p54
-+(dp55
-+S'output'
-+p56
-+(dp57
-+S'all'
-+p58
-+S'| tee -a /var/log/cloud-init-output.log'
-+p59
-+ssS'users'
-+p60
-+(lp61
-+S'default'
-+p62
-+asS'def_log_file'
-+p63
-+S'/var/log/cloud-init.log'
-+p64
-+sS'cloud_final_modules'
-+p65
-+(lp66
-+S'rightscale_userdata'
-+p67
-+aS'scripts-vendor'
-+p68
-+aS'scripts-per-once'
-+p69
-+aS'scripts-per-boot'
-+p70
-+aS'scripts-per-instance'
-+p71
-+aS'scripts-user'
-+p72
-+aS'ssh-authkey-fingerprints'
-+p73
-+aS'keys-to-console'
-+p74
-+aS'phone-home'
-+p75
-+aS'final-message'
-+p76
-+aS'power-state-change'
-+p77
-+asS'disable_root'
-+p78
-+I01
-+sS'syslog_fix_perms'
-+p79
-+S'syslog:adm'
-+p80
-+sS'log_cfgs'
-+p81
-+(lp82
-+(lp83
-+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
-+p84
-+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
-+p85
-+aa(lp86
-+g84
-+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
-+p87
-+aasS'cloud_init_modules'
-+p88
-+(lp89
-+S'migrator'
-+p90
-+aS'seed_random'
-+p91
-+aS'bootcmd'
-+p92
-+aS'write-files'
-+p93
-+aS'growpart'
-+p94
-+aS'resizefs'
-+p95
-+aS'set_hostname'
-+p96
-+aS'update_hostname'
-+p97
-+aS'update_etc_hosts'
-+p98
-+aS'ca-certs'
-+p99
-+aS'rsyslog'
-+p100
-+aS'users-groups'
-+p101
-+aS'ssh'
-+p102
-+asS'preserve_hostname'
-+p103
-+I00
-+sS'_log'
-+p104
-+(lp105
-+g84
-+ag87
-+ag85
-+asS'datasource_list'
-+p106
-+(lp107
-+S'NoCloud'
-+p108
-+aS'ConfigDrive'
-+p109
-+aS'OpenNebula'
-+p110
-+aS'Azure'
-+p111
-+aS'AltCloud'
-+p112
-+aS'OVF'
-+p113
-+aS'MAAS'
-+p114
-+aS'GCE'
-+p115
-+aS'OpenStack'
-+p116
-+aS'CloudSigma'
-+p117
-+aS'Ec2'
-+p118
-+aS'CloudStack'
-+p119
-+aS'SmartOS'
-+p120
-+aS'None'
-+p121
-+asS'vendor_data'
-+p122
-+(dp123
-+S'prefix'
-+p124
-+(lp125
-+sS'enabled'
-+p126
-+I01
-+ssS'cloud_config_modules'
-+p127
-+(lp128
-+S'emit_upstart'
-+p129
-+aS'disk_setup'
-+p130
-+aS'mounts'
-+p131
-+aS'ssh-import-id'
-+p132
-+aS'locale'
-+p133
-+aS'set-passwords'
-+p134
-+aS'grub-dpkg'
-+p135
-+aS'apt-pipelining'
-+p136
-+aS'apt-configure'
-+p137
-+aS'package-update-upgrade-install'
-+p138
-+aS'landscape'
-+p139
-+aS'timezone'
-+p140
-+aS'puppet'
-+p141
-+aS'chef'
-+p142
-+aS'salt-minion'
-+p143
-+aS'mcollective'
-+p144
-+aS'disable-ec2-metadata'
-+p145
-+aS'runcmd'
-+p146
-+aS'byobu'
-+p147
-+assg14
-+Nsg16
-+Nsg18
-+S'#cloud-config\n{}\n\n'
-+p148
-+sg29
-+S'#cloud-config\n{}\n\n'
-+p149
-+sS'dsmode'
-+p150
-+S'net'
-+p151
-+sS'seed'
-+p152
-+S'/var/lib/cloud/seed/nocloud-net'
-+p153
-+sS'cmdline_id'
-+p154
-+S'ds=nocloud-net'
-+p155
-+sS'ud_proc'
-+p156
-+g1
-+(ccloudinit.user_data
-+UserDataProcessor
-+p157
-+g3
-+NtRp158
-+(dp159
-+g6
-+g8
-+sS'ssl_details'
-+p160
-+(dp161
-+sbsg50
-+g153
-+sS'ds_cfg'
-+p162
-+(dp163
-+sS'distro'
-+p164
-+g1
-+(ccloudinit.distros.ubuntu
-+Distro
-+p165
-+g3
-+NtRp166
-+(dp167
-+S'osfamily'
-+p168
-+S'debian'
-+p169
-+sS'_paths'
-+p170
-+g8
-+sS'name'
-+p171
-+S'ubuntu'
-+p172
-+sS'_runner'
-+p173
-+g1
-+(ccloudinit.helpers
-+Runners
-+p174
-+g3
-+NtRp175
-+(dp176
-+g6
-+g8
-+sS'sems'
-+p177
-+(dp178
-+sbsS'_cfg'
-+p179
-+(dp180
-+S'paths'
-+p181
-+(dp182
-+g37
-+g38
-+sg39
-+g40
-+sg41
-+g42
-+ssS'default_user'
-+p183
-+(dp184
-+S'shell'
-+p185
-+S'/bin/bash'
-+p186
-+sS'name'
-+p187
-+S'ubuntu'
-+p188
-+sS'sudo'
-+p189
-+(lp190
-+S'ALL=(ALL) NOPASSWD:ALL'
-+p191
-+asS'lock_passwd'
-+p192
-+I01
-+sS'gecos'
-+p193
-+S'Ubuntu'
-+p194
-+sS'groups'
-+p195
-+(lp196
-+S'adm'
-+p197
-+aS'audio'
-+p198
-+aS'cdrom'
-+p199
-+aS'dialout'
-+p200
-+aS'dip'
-+p201
-+aS'floppy'
-+p202
-+aS'netdev'
-+p203
-+aS'plugdev'
-+p204
-+aS'sudo'
-+p205
-+aS'video'
-+p206
-+assS'package_mirrors'
-+p207
-+(lp208
-+(dp209
-+S'arches'
-+p210
-+(lp211
-+S'i386'
-+p212
-+aS'amd64'
-+p213
-+asS'failsafe'
-+p214
-+(dp215
-+S'security'
-+p216
-+S'http://security.ubuntu.com/ubuntu'
-+p217
-+sS'primary'
-+p218
-+S'http://archive.ubuntu.com/ubuntu'
-+p219
-+ssS'search'
-+p220
-+(dp221
-+S'security'
-+p222
-+(lp223
-+sS'primary'
-+p224
-+(lp225
-+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
-+p226
-+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
-+p227
-+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
-+p228
-+assa(dp229
-+S'arches'
-+p230
-+(lp231
-+S'armhf'
-+p232
-+aS'armel'
-+p233
-+aS'default'
-+p234
-+asS'failsafe'
-+p235
-+(dp236
-+S'security'
-+p237
-+S'http://ports.ubuntu.com/ubuntu-ports'
-+p238
-+sS'primary'
-+p239
-+S'http://ports.ubuntu.com/ubuntu-ports'
-+p240
-+ssasS'ssh_svcname'
-+p241
-+S'ssh'
-+p242
-+ssbsS'metadata'
-+p243
-+(dp244
-+g150
-+g151
-+sS'local-hostname'
-+p245
-+S'trusty-upgrade2'
-+p246
-+sS'instance-id'
-+p247
-+S'trusty-upgrade2'
-+p248
-+ssb.
-\ No newline at end of file
diff --git a/debian/patches/cpick-d132356c-fix-error-on-upgrade-caused-by-new-vendordata2 b/debian/patches/cpick-d132356c-fix-error-on-upgrade-caused-by-new-vendordata2
deleted file mode 100644
index 7bd48fe7..00000000
--- a/debian/patches/cpick-d132356c-fix-error-on-upgrade-caused-by-new-vendordata2
+++ /dev/null
@@ -1,147 +0,0 @@
-From d132356cc361abef2d90d4073438f3ab759d5964 Mon Sep 17 00:00:00 2001
-From: James Falcon <TheRealFalcon@users.noreply.github.com>
-Date: Mon, 19 Apr 2021 11:31:28 -0500
-Subject: [PATCH] fix error on upgrade caused by new vendordata2 attributes
- (#869)
-
-In #777, we added 'vendordata2' and 'vendordata2_raw' attributes to
-the DataSource class, but didn't use the upgrade framework to deal
-with an unpickle after upgrade. This commit adds the necessary
-upgrade code.
-
-Additionally, added a smaller-scope upgrade test to our integration
-tests that will be run on every CI run so we catch these issues
-immediately in the future.
-
-LP: #1922739
----
- cloudinit/sources/__init__.py | 12 +++++++++++-
- cloudinit/tests/test_upgrade.py | 4 ++++
- tests/integration_tests/clouds.py | 4 ++--
- tests/integration_tests/test_upgrade.py | 25 ++++++++++++++++++++++++-
- 4 files changed, 41 insertions(+), 4 deletions(-)
-
---- a/cloudinit/sources/__init__.py
-+++ b/cloudinit/sources/__init__.py
-@@ -24,6 +24,7 @@ from cloudinit import util
- from cloudinit.atomic_helper import write_json
- from cloudinit.event import EventType
- from cloudinit.filters import launch_index
-+from cloudinit.persistence import CloudInitPickleMixin
- from cloudinit.reporting import events
-
- DSMODE_DISABLED = "disabled"
-@@ -134,7 +135,7 @@ URLParams = namedtuple(
- 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
-
-
--class DataSource(metaclass=abc.ABCMeta):
-+class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
-
- dsmode = DSMODE_NETWORK
- default_locale = 'en_US.UTF-8'
-@@ -196,6 +197,8 @@ class DataSource(metaclass=abc.ABCMeta):
- # non-root users
- sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
-
-+ _ci_pkl_version = 1
-+
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
- self.sys_cfg = sys_cfg
- self.distro = distro
-@@ -218,6 +221,13 @@ class DataSource(metaclass=abc.ABCMeta):
- else:
- self.ud_proc = ud_proc
-
-+ def _unpickle(self, ci_pkl_version: int) -> None:
-+ """Perform deserialization fixes for Paths."""
-+ if not hasattr(self, 'vendordata2'):
-+ self.vendordata2 = None
-+ if not hasattr(self, 'vendordata2_raw'):
-+ self.vendordata2_raw = None
-+
- def __str__(self):
- return type_utils.obj_name(self)
-
---- a/cloudinit/tests/test_upgrade.py
-+++ b/cloudinit/tests/test_upgrade.py
-@@ -46,3 +46,7 @@ class TestUpgrade:
-
- def test_paths_has_run_dir_attribute(self, previous_obj_pkl):
- assert previous_obj_pkl.paths.run_dir is not None
-+
-+ def test_vendordata_exists(self, previous_obj_pkl):
-+ assert previous_obj_pkl.vendordata2 is None
-+ assert previous_obj_pkl.vendordata2_raw is None
---- a/tests/integration_tests/clouds.py
-+++ b/tests/integration_tests/clouds.py
-@@ -110,14 +110,14 @@ class IntegrationCloud(ABC):
- # Even if we're using the default key, it may still have a
- # different name in the clouds, so we need to set it separately.
- self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME
-- self._released_image_id = self._get_initial_image()
-+ self.released_image_id = self._get_initial_image()
- self.snapshot_id = None
-
- @property
- def image_id(self):
- if self.snapshot_id:
- return self.snapshot_id
-- return self._released_image_id
-+ return self.released_image_id
-
- def emit_settings_to_log(self) -> None:
- log.info(
---- a/tests/integration_tests/test_upgrade.py
-+++ b/tests/integration_tests/test_upgrade.py
-@@ -1,4 +1,5 @@
- import logging
-+import os
- import pytest
- import time
- from pathlib import Path
-@@ -8,6 +9,8 @@ from tests.integration_tests.conftest im
- get_validated_source,
- session_start_time,
- )
-+from tests.integration_tests.instances import CloudInitSource
-+
-
- log = logging.getLogger('integration_testing')
-
-@@ -63,7 +66,7 @@ def test_upgrade(session_cloud: Integrat
- return # type checking doesn't understand that skip raises
-
- launch_kwargs = {
-- 'image_id': session_cloud._get_initial_image(),
-+ 'image_id': session_cloud.released_image_id,
- }
-
- image = ImageSpecification.from_os_image()
-@@ -93,6 +96,26 @@ def test_upgrade(session_cloud: Integrat
- instance.install_new_cloud_init(source, take_snapshot=False)
- instance.execute('hostname something-else')
- _restart(instance)
-+ assert instance.execute('cloud-init status --wait --long').ok
- _output_to_compare(instance, after_path, netcfg_path)
-
- log.info('Wrote upgrade test logs to %s and %s', before_path, after_path)
-+
-+
-+@pytest.mark.ci
-+@pytest.mark.ubuntu
-+def test_upgrade_package(session_cloud: IntegrationCloud):
-+ if get_validated_source(session_cloud) != CloudInitSource.DEB_PACKAGE:
-+ not_run_message = 'Test only supports upgrading to build deb'
-+ if os.environ.get('TRAVIS'):
-+ # If this isn't running on CI, we should know
-+ pytest.fail(not_run_message)
-+ else:
-+ pytest.skip(not_run_message)
-+
-+ launch_kwargs = {'image_id': session_cloud.released_image_id}
-+
-+ with session_cloud.launch(launch_kwargs=launch_kwargs) as instance:
-+ instance.install_deb()
-+ instance.restart()
-+ assert instance.execute('cloud-init status --wait --long').ok
diff --git a/debian/patches/series b/debian/patches/series
deleted file mode 100644
index 70ce620b..00000000
--- a/debian/patches/series
+++ /dev/null
@@ -1,2 +0,0 @@
-cpick-83f6bbfb-Fix-unpickle-for-source-paths-missing-run_dir-863
-cpick-d132356c-fix-error-on-upgrade-caused-by-new-vendordata2
diff --git a/debian/po/templates.pot b/debian/po/templates.pot
index e88025ef..ac7b10a6 100644
--- a/debian/po/templates.pot
+++ b/debian/po/templates.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: cloud-init\n"
"Report-Msgid-Bugs-To: cloud-init@packages.debian.org\n"
-"POT-Creation-Date: 2021-03-22 14:08-0500\n"
+"POT-Creation-Date: 2021-04-20 10:14-0400\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -158,6 +158,12 @@ msgstr ""
#. Type: multiselect
#. Choices
#: ../cloud-init.templates:1001
+msgid "Vultr: Vultr"
+msgstr ""
+
+#. Type: multiselect
+#. Choices
+#: ../cloud-init.templates:1001
msgid "None: Failsafe datasource"
msgstr ""
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index 8cebfd80..414111a1 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -15,41 +15,41 @@ apt:
sources:
source1:
source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.12 (Darwin)
- Comment: GPGTools - http://gpgtools.org
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
- PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
- CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
- AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
- Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
- SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
- OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
- Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
- IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
- twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
- DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
- WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
- 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
- dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
- MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
- 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
- zA==
- =IxPr
- -----END PGP PUBLIC KEY BLOCK-----
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.12 (Darwin)
+ Comment: GPGTools - http://gpgtools.org
+
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
+ PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
+ CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
+ AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
+ Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
+ SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
+ OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
+ Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
+ IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
+ twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
+ DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
+ WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
+ 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
+ dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
+ MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
+ 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
+ zA==
+ =IxPr
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index f58b2b38..f3e13edc 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -56,6 +56,7 @@ environments in the public cloud:
- AltCloud
- SmartOS
- UpCloud
+- Vultr
Additionally, cloud-init is supported on these private clouds:
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index a5282e35..f7dfcd3a 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -150,7 +150,7 @@ Things that run here include
* package installations
* configuration management plugins (puppet, chef, salt-minion)
- * user-scripts (i.e. shell scripts passed as user-data)
+ * user-defined scripts (i.e. shell scripts passed as user-data)
For scripts external to cloud-init looking to wait until cloud-init is
finished, the ``cloud-init status`` subcommand can help block external
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 228173d2..497b1467 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -49,7 +49,7 @@ The following is a list of documents for each supported datasource:
datasources/smartos.rst
datasources/upcloud.rst
datasources/zstack.rst
-
+ datasources/vultr.rst
Creation
========
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index e04c3a33..ad9f2236 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -5,28 +5,6 @@ Azure
This datasource finds metadata and user-data from the Azure cloud platform.
-walinuxagent
-------------
-walinuxagent has several functions within images. For cloud-init
-specifically, the relevant functionality it performs is to register the
-instance with the Azure cloud platform at boot so networking will be
-permitted. For more information about the other functionality of
-walinuxagent, see `Azure's documentation
-<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details.
-(Note, however, that only one of walinuxagent's provisioning and cloud-init
-should be used to perform instance customisation.)
-
-If you are configuring walinuxagent yourself, you will want to ensure that you
-have `Provisioning.UseCloudInit
-<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to
-``y``.
-
-
-Builtin Agent
--------------
-An alternative to using walinuxagent to register to the Azure cloud platform
-is to use the ``__builtin__`` agent command. This section contains more
-background on what that code path does, and how to enable it.
The Azure cloud platform provides initial data to an instance via an attached
CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
@@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in
'dhclient_hook' of cloud-init itself. This sub-command will write the client
information in json format to /run/cloud-init/dhclient.hook/<interface>.json.
-In order for cloud-init to leverage this method to find the endpoint, the
-cloud.cfg file must contain:
-
-.. sourcecode:: yaml
-
- datasource:
- Azure:
- set_hostname: False
- agent_command: __builtin__
-
If those files are not available, the fallback is to check the leases file
for the endpoint server (again option 245).
@@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
The settings that may be configured are:
- * **agent_command**: Either __builtin__ (default) or a command to run to getcw
- metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the
- provided command to obtain metadata.
* **apply_network_config**: Boolean set to True to use network configuration
described by Azure's IMDS endpoint instead of fallback network config of
dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
@@ -121,7 +86,6 @@ An example configuration with the default values is provided below:
datasource:
Azure:
- agent_command: __builtin__
apply_network_config: true
data_dir: /var/lib/waagent
dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
@@ -144,9 +108,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
which will be selected.
-In the example below, user-data provided is 'this is my userdata', and the
-datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
-That agent command will take affect as if it were specified in system config.
+In the example below, user-data provided is 'this is my userdata'
Example:
@@ -184,20 +146,16 @@ The hostname is provided to the instance in the ovf-env.xml file as
Whatever value the instance provides in its dhcp request will resolve in the
domain returned in the 'search' request.
-The interesting issue is that a generic image will already have a hostname
-configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
-system, and the initial dhcp request on eth0 is not guaranteed to occur after
-the datasource code has been run. So, on first boot, that initial value will
-be sent in the dhcp request and *that* value will resolve.
-
-In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
-dhcp request must be made with the new value. Walinuxagent (in its current
-version) handles this by polling the state of hostname and bouncing ('``ifdown
-eth0; ifup eth0``' the network interface if it sees that a change has been
-made.
+A generic image will already have a hostname configured. The ubuntu
+cloud images have 'ubuntu' as the hostname of the system, and the
+initial dhcp request on eth0 is not guaranteed to occur after the
+datasource code has been run. So, on first boot, that initial value
+will be sent in the dhcp request and *that* value will resolve.
-cloud-init handles this by setting the hostname in the DataSource's 'get_data'
-method via '``hostname $HostName``', and then bouncing the interface. This
+In order to make the ``HostName`` provided in the ovf-env.xml resolve,
+a dhcp request must be made with the new value. cloud-init handles
+this by setting the hostname in the DataSource's 'get_data' method via
+'``hostname $HostName``', and then bouncing the interface. This
behavior can be configured or disabled in the datasource config. See
'Configuration' above.
diff --git a/doc/rtd/topics/datasources/vultr.rst b/doc/rtd/topics/datasources/vultr.rst
new file mode 100644
index 00000000..e73406a8
--- /dev/null
+++ b/doc/rtd/topics/datasources/vultr.rst
@@ -0,0 +1,35 @@
+.. _datasource_vultr:
+
+Vultr
+=====
+
+The `Vultr`_ datasource retrieves basic configuration values from the locally
+accessible `metadata service`_. All data is served over HTTP from the address
+169.254.169.254. The endpoints are documented in,
+`https://www.vultr.com/metadata/
+<https://www.vultr.com/metadata/>`_
+
+Configuration
+-------------
+
+Vultr's datasource can be configured as follows:
+
+ datasource:
+ Vultr:
+ url: 'http://169.254.169.254'
+ retries: 3
+ timeout: 2
+ wait: 2
+
+- *url*: The URL used to aquire the metadata configuration from
+- *retries*: Determines the number of times to attempt to connect to the
+ metadata service
+- *timeout*: Determines the timeout in seconds to wait for a response from the
+ metadata service
+- *wait*: Determines the timeout in seconds to wait before retrying after
+ accessible failure
+
+.. _Vultr: https://www.vultr.com/
+.. _metadata service: https://www.vultr.com/metadata/
+
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 07cad765..5f7a74f8 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -148,6 +148,10 @@ The following Datasources optionally provide network configuration:
- `UpCloud JSON metadata`_
+- :ref:`datasource_vultr`
+
+ - `Vultr JSON metadata`_
+
For more information on network configuration formats
.. toctree::
@@ -262,5 +266,6 @@ Example output converting V2 to sysconfig:
.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
+.. _Vultr JSON metadata: https://www.vultr.com/metadata/
.. vi: textwidth=78
diff --git a/doc/rtd/topics/vendordata.rst b/doc/rtd/topics/vendordata.rst
index cdb552d0..87a899b3 100644
--- a/doc/rtd/topics/vendordata.rst
+++ b/doc/rtd/topics/vendordata.rst
@@ -47,8 +47,8 @@ way as user-data.
The only differences are:
- * user-scripts are stored in a different location than user-scripts (to
- avoid namespace collision)
+ * vendor-data-defined scripts are stored in a different location than
+ user-data-defined scripts (to avoid namespace collision)
* user can disable part handlers by cloud-config settings.
For example, to disable handling of 'part-handlers' in vendor-data,
the user could provide user-data like this:
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 0773356b..9b103ef9 100755
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,7 +83,7 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["rhel", "fedora", "centos"] %}
+{% if variant in ["almalinux", "rhel", "fedora", "centos"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index f140344d..a5c51277 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -10,7 +10,7 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "centos", "fedora", "rhel"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl
index 0a759b04..b9d58172 100644
--- a/templates/chef_client.rb.tmpl
+++ b/templates/chef_client.rb.tmpl
@@ -15,7 +15,7 @@ The reason these are not in quotes is because they are ruby
symbols that will be placed inside here, and not actual strings...
#}
{% if chef_license %}
-chef_license "{{chef_license}}"
+chef_license "{{chef_license}}"
{% endif%}
{% if log_level %}
log_level {{log_level}}
diff --git a/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
new file mode 100644
index 00000000..c7d7844b
--- /dev/null
+++ b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
@@ -0,0 +1,504 @@
+ccopy_reg
+_reconstructor
+p1
+(ccloudinit.sources.DataSourceNoCloud
+DataSourceNoCloudNet
+p2
+c__builtin__
+object
+p3
+NtRp4
+(dp5
+S'paths'
+p6
+g1
+(ccloudinit.helpers
+Paths
+p7
+g3
+NtRp8
+(dp9
+S'lookups'
+p10
+(dp11
+S'cloud_config'
+p12
+S'cloud-config.txt'
+p13
+sS'userdata'
+p14
+S'user-data.txt.i'
+p15
+sS'vendordata'
+p16
+S'vendor-data.txt.i'
+p17
+sS'userdata_raw'
+p18
+S'user-data.txt'
+p19
+sS'boothooks'
+p20
+g20
+sS'scripts'
+p21
+g21
+sS'sem'
+p22
+g22
+sS'data'
+p23
+g23
+sS'vendor_scripts'
+p24
+S'scripts/vendor'
+p25
+sS'handlers'
+p26
+g26
+sS'obj_pkl'
+p27
+S'obj.pkl'
+p28
+sS'vendordata_raw'
+p29
+S'vendor-data.txt'
+p30
+sS'vendor_cloud_config'
+p31
+S'vendor-cloud-config.txt'
+p32
+ssS'template_tpl'
+p33
+S'/etc/cloud/templates/%s.tmpl'
+p34
+sS'cfgs'
+p35
+(dp36
+S'cloud_dir'
+p37
+S'/var/lib/cloud/'
+p38
+sS'templates_dir'
+p39
+S'/etc/cloud/templates/'
+p40
+sS'upstart_dir'
+p41
+S'/etc/init/'
+p42
+ssS'cloud_dir'
+p43
+g38
+sS'datasource'
+p44
+NsS'upstart_conf_d'
+p45
+g42
+sS'boot_finished'
+p46
+S'/var/lib/cloud/instance/boot-finished'
+p47
+sS'instance_link'
+p48
+S'/var/lib/cloud/instance'
+p49
+sS'seed_dir'
+p50
+S'/var/lib/cloud/seed'
+p51
+sbsS'supported_seed_starts'
+p52
+(S'http://'
+S'https://'
+S'ftp://'
+tp53
+sS'sys_cfg'
+p54
+(dp55
+S'output'
+p56
+(dp57
+S'all'
+p58
+S'| tee -a /var/log/cloud-init-output.log'
+p59
+ssS'users'
+p60
+(lp61
+S'default'
+p62
+asS'def_log_file'
+p63
+S'/var/log/cloud-init.log'
+p64
+sS'cloud_final_modules'
+p65
+(lp66
+S'rightscale_userdata'
+p67
+aS'scripts-vendor'
+p68
+aS'scripts-per-once'
+p69
+aS'scripts-per-boot'
+p70
+aS'scripts-per-instance'
+p71
+aS'scripts-user'
+p72
+aS'ssh-authkey-fingerprints'
+p73
+aS'keys-to-console'
+p74
+aS'phone-home'
+p75
+aS'final-message'
+p76
+aS'power-state-change'
+p77
+asS'disable_root'
+p78
+I01
+sS'syslog_fix_perms'
+p79
+S'syslog:adm'
+p80
+sS'log_cfgs'
+p81
+(lp82
+(lp83
+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
+p84
+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
+p85
+aa(lp86
+g84
+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+p87
+aasS'cloud_init_modules'
+p88
+(lp89
+S'migrator'
+p90
+aS'seed_random'
+p91
+aS'bootcmd'
+p92
+aS'write-files'
+p93
+aS'growpart'
+p94
+aS'resizefs'
+p95
+aS'set_hostname'
+p96
+aS'update_hostname'
+p97
+aS'update_etc_hosts'
+p98
+aS'ca-certs'
+p99
+aS'rsyslog'
+p100
+aS'users-groups'
+p101
+aS'ssh'
+p102
+asS'preserve_hostname'
+p103
+I00
+sS'_log'
+p104
+(lp105
+g84
+ag87
+ag85
+asS'datasource_list'
+p106
+(lp107
+S'NoCloud'
+p108
+aS'ConfigDrive'
+p109
+aS'OpenNebula'
+p110
+aS'Azure'
+p111
+aS'AltCloud'
+p112
+aS'OVF'
+p113
+aS'MAAS'
+p114
+aS'GCE'
+p115
+aS'OpenStack'
+p116
+aS'CloudSigma'
+p117
+aS'Ec2'
+p118
+aS'CloudStack'
+p119
+aS'SmartOS'
+p120
+aS'None'
+p121
+asS'vendor_data'
+p122
+(dp123
+S'prefix'
+p124
+(lp125
+sS'enabled'
+p126
+I01
+ssS'cloud_config_modules'
+p127
+(lp128
+S'emit_upstart'
+p129
+aS'disk_setup'
+p130
+aS'mounts'
+p131
+aS'ssh-import-id'
+p132
+aS'locale'
+p133
+aS'set-passwords'
+p134
+aS'grub-dpkg'
+p135
+aS'apt-pipelining'
+p136
+aS'apt-configure'
+p137
+aS'package-update-upgrade-install'
+p138
+aS'landscape'
+p139
+aS'timezone'
+p140
+aS'puppet'
+p141
+aS'chef'
+p142
+aS'salt-minion'
+p143
+aS'mcollective'
+p144
+aS'disable-ec2-metadata'
+p145
+aS'runcmd'
+p146
+aS'byobu'
+p147
+assg14
+Nsg16
+Nsg18
+S'#cloud-config\n{}\n\n'
+p148
+sg29
+S'#cloud-config\n{}\n\n'
+p149
+sS'dsmode'
+p150
+S'net'
+p151
+sS'seed'
+p152
+S'/var/lib/cloud/seed/nocloud-net'
+p153
+sS'cmdline_id'
+p154
+S'ds=nocloud-net'
+p155
+sS'ud_proc'
+p156
+g1
+(ccloudinit.user_data
+UserDataProcessor
+p157
+g3
+NtRp158
+(dp159
+g6
+g8
+sS'ssl_details'
+p160
+(dp161
+sbsg50
+g153
+sS'ds_cfg'
+p162
+(dp163
+sS'distro'
+p164
+g1
+(ccloudinit.distros.ubuntu
+Distro
+p165
+g3
+NtRp166
+(dp167
+S'osfamily'
+p168
+S'debian'
+p169
+sS'_paths'
+p170
+g8
+sS'name'
+p171
+S'ubuntu'
+p172
+sS'_runner'
+p173
+g1
+(ccloudinit.helpers
+Runners
+p174
+g3
+NtRp175
+(dp176
+g6
+g8
+sS'sems'
+p177
+(dp178
+sbsS'_cfg'
+p179
+(dp180
+S'paths'
+p181
+(dp182
+g37
+g38
+sg39
+g40
+sg41
+g42
+ssS'default_user'
+p183
+(dp184
+S'shell'
+p185
+S'/bin/bash'
+p186
+sS'name'
+p187
+S'ubuntu'
+p188
+sS'sudo'
+p189
+(lp190
+S'ALL=(ALL) NOPASSWD:ALL'
+p191
+asS'lock_passwd'
+p192
+I01
+sS'gecos'
+p193
+S'Ubuntu'
+p194
+sS'groups'
+p195
+(lp196
+S'adm'
+p197
+aS'audio'
+p198
+aS'cdrom'
+p199
+aS'dialout'
+p200
+aS'dip'
+p201
+aS'floppy'
+p202
+aS'netdev'
+p203
+aS'plugdev'
+p204
+aS'sudo'
+p205
+aS'video'
+p206
+assS'package_mirrors'
+p207
+(lp208
+(dp209
+S'arches'
+p210
+(lp211
+S'i386'
+p212
+aS'amd64'
+p213
+asS'failsafe'
+p214
+(dp215
+S'security'
+p216
+S'http://security.ubuntu.com/ubuntu'
+p217
+sS'primary'
+p218
+S'http://archive.ubuntu.com/ubuntu'
+p219
+ssS'search'
+p220
+(dp221
+S'security'
+p222
+(lp223
+sS'primary'
+p224
+(lp225
+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
+p226
+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
+p227
+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
+p228
+assa(dp229
+S'arches'
+p230
+(lp231
+S'armhf'
+p232
+aS'armel'
+p233
+aS'default'
+p234
+asS'failsafe'
+p235
+(dp236
+S'security'
+p237
+S'http://ports.ubuntu.com/ubuntu-ports'
+p238
+sS'primary'
+p239
+S'http://ports.ubuntu.com/ubuntu-ports'
+p240
+ssasS'ssh_svcname'
+p241
+S'ssh'
+p242
+ssbsS'metadata'
+p243
+(dp244
+g150
+g151
+sS'local-hostname'
+p245
+S'trusty-upgrade2'
+p246
+sS'instance-id'
+p247
+S'trusty-upgrade2'
+p248
+ssb. \ No newline at end of file
diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py
index 7ad0e809..68b96b1d 100644
--- a/tests/integration_tests/bugs/test_lp1813396.py
+++ b/tests/integration_tests/bugs/test_lp1813396.py
@@ -6,7 +6,7 @@ Ensure gpg is called with no tty flag.
import pytest
from tests.integration_tests.instances import IntegrationInstance
-from tests.integration_tests.log_utils import verify_ordered_items_in_text
+from tests.integration_tests.util import verify_ordered_items_in_text
USER_DATA = """\
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index a6026309..3bbccb44 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -25,6 +25,7 @@ from tests.integration_tests.instances import (
IntegrationOciInstance,
IntegrationLxdInstance,
)
+from tests.integration_tests.util import emit_dots_on_travis
try:
from typing import Optional
@@ -110,14 +111,14 @@ class IntegrationCloud(ABC):
# Even if we're using the default key, it may still have a
# different name in the clouds, so we need to set it separately.
self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME
- self._released_image_id = self._get_initial_image()
+ self.released_image_id = self._get_initial_image()
self.snapshot_id = None
@property
def image_id(self):
if self.snapshot_id:
return self.snapshot_id
- return self._released_image_id
+ return self.released_image_id
def emit_settings_to_log(self) -> None:
log.info(
@@ -167,7 +168,8 @@ class IntegrationCloud(ABC):
"\n".join("{}={}".format(*item) for item in kwargs.items())
)
- pycloudlib_instance = self._perform_launch(kwargs)
+ with emit_dots_on_travis():
+ pycloudlib_instance = self._perform_launch(kwargs)
log.info('Launched instance: %s', pycloudlib_instance)
instance = self.get_instance(pycloudlib_instance, settings)
if kwargs.get('wait', True):
diff --git a/tests/integration_tests/log_utils.py b/tests/integration_tests/log_utils.py
deleted file mode 100644
index 40baae7b..00000000
--- a/tests/integration_tests/log_utils.py
+++ /dev/null
@@ -1,11 +0,0 @@
-def verify_ordered_items_in_text(to_verify: list, text: str):
- """Assert all items in list appear in order in text.
-
- Examples:
- verify_ordered_items_in_text(['a', '1'], 'ab1') # passes
- verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError
- """
- index = 0
- for item in to_verify:
- index = text[index:].find(item)
- assert index > -1, "Expected item not found: '{}'".format(item)
diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py
index 298c9e6d..56dff9a0 100644
--- a/tests/integration_tests/modules/test_keys_to_console.py
+++ b/tests/integration_tests/modules/test_keys_to_console.py
@@ -10,6 +10,11 @@ ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
"""
+BLACKLIST_ALL_KEYS_USER_DATA = """\
+#cloud-config
+ssh_fp_console_blacklist: [ssh-dsa, ssh-ecdsa, ssh-ed25519, ssh-rsa, ssh-dss, ecdsa-sha2-nistp256]
+""" # noqa: E501
+
DISABLED_USER_DATA = """\
#cloud-config
ssh:
@@ -31,6 +36,20 @@ class TestKeysToConsoleBlacklist:
assert "({})".format(key_type) in syslog
+@pytest.mark.user_data(BLACKLIST_ALL_KEYS_USER_DATA)
+class TestAllKeysToConsoleBlacklist:
+ """Test that when key blacklist contains all key types that
+ no header/footer are output.
+ """
+ def test_header_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog
+
+ def test_footer_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "END SSH HOST KEY FINGERPRINTS" not in syslog
+
+
@pytest.mark.user_data(DISABLED_USER_DATA)
class TestKeysToConsoleDisabled:
"""Test that output can be fully disabled."""
diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py
index eebe6608..5f3a32ac 100644
--- a/tests/integration_tests/modules/test_power_state_change.py
+++ b/tests/integration_tests/modules/test_power_state_change.py
@@ -9,7 +9,7 @@ import pytest
from tests.integration_tests.clouds import IntegrationCloud
from tests.integration_tests.instances import IntegrationInstance
-from tests.integration_tests.log_utils import verify_ordered_items_in_text
+from tests.integration_tests.util import verify_ordered_items_in_text
USER_DATA = """\
#cloud-config
diff --git a/tests/integration_tests/modules/test_set_hostname.py b/tests/integration_tests/modules/test_set_hostname.py
index 2bfa403d..e7f7f6b6 100644
--- a/tests/integration_tests/modules/test_set_hostname.py
+++ b/tests/integration_tests/modules/test_set_hostname.py
@@ -24,6 +24,13 @@ hostname: cloudinit1
fqdn: cloudinit2.i9n.cloud-init.io
"""
+USER_DATA_PREFER_FQDN = """\
+#cloud-config
+prefer_fqdn_over_hostname: {}
+hostname: cloudinit1
+fqdn: cloudinit2.test.io
+"""
+
@pytest.mark.ci
class TestHostname:
@@ -33,6 +40,16 @@ class TestHostname:
hostname_output = client.execute("hostname")
assert "cloudinit2" in hostname_output.strip()
+ @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(True))
+ def test_prefer_fqdn(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit2.test.io" in hostname_output.strip()
+
+ @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(False))
+ def test_prefer_short_hostname(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit1" in hostname_output.strip()
+
@pytest.mark.user_data(USER_DATA_FQDN)
def test_hostname_and_fqdn(self, client):
hostname_output = client.execute("hostname")
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
index c20cb3c1..7478a1b9 100644
--- a/tests/integration_tests/test_upgrade.py
+++ b/tests/integration_tests/test_upgrade.py
@@ -1,4 +1,5 @@
import logging
+import os
import pytest
import time
from pathlib import Path
@@ -9,8 +10,12 @@ from tests.integration_tests.conftest import (
session_start_time,
)
+
log = logging.getLogger('integration_testing')
+UNSUPPORTED_INSTALL_METHOD_MSG = (
+ "Install method '{}' not supported for this test"
+)
USER_DATA = """\
#cloud-config
hostname: SRU-worked
@@ -54,16 +59,14 @@ def _restart(instance):
@pytest.mark.sru_2020_11
-def test_upgrade(session_cloud: IntegrationCloud):
+def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
source = get_validated_source(session_cloud)
if not source.installs_new_version():
- pytest.skip("Install method '{}' not supported for this test".format(
- source
- ))
+ pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
return # type checking doesn't understand that skip raises
launch_kwargs = {
- 'image_id': session_cloud._get_initial_image(),
+ 'image_id': session_cloud.released_image_id,
}
image = ImageSpecification.from_os_image()
@@ -93,6 +96,29 @@ def test_upgrade(session_cloud: IntegrationCloud):
instance.install_new_cloud_init(source, take_snapshot=False)
instance.execute('hostname something-else')
_restart(instance)
+ assert instance.execute('cloud-init status --wait --long').ok
_output_to_compare(instance, after_path, netcfg_path)
log.info('Wrote upgrade test logs to %s and %s', before_path, after_path)
+
+
+@pytest.mark.ci
+@pytest.mark.ubuntu
+def test_subsequent_boot_of_upgraded_package(session_cloud: IntegrationCloud):
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ if os.environ.get('TRAVIS'):
+ # If this isn't running on CI, we should know
+ pytest.fail(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ else:
+ pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ return # type checking doesn't understand that skip raises
+
+ launch_kwargs = {'image_id': session_cloud.released_image_id}
+
+ with session_cloud.launch(launch_kwargs=launch_kwargs) as instance:
+ instance.install_new_cloud_init(
+ source, take_snapshot=False, clean=False
+ )
+ instance.restart()
+ assert instance.execute('cloud-init status --wait --long').ok
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
new file mode 100644
index 00000000..3ef12358
--- /dev/null
+++ b/tests/integration_tests/util.py
@@ -0,0 +1,49 @@
+import logging
+import multiprocessing
+import os
+import time
+from contextlib import contextmanager
+
+log = logging.getLogger('integration_testing')
+
+
+def verify_ordered_items_in_text(to_verify: list, text: str):
+ """Assert all items in list appear in order in text.
+
+ Examples:
+ verify_ordered_items_in_text(['a', '1'], 'ab1') # passes
+ verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError
+ """
+ index = 0
+ for item in to_verify:
+ index = text[index:].find(item)
+ assert index > -1, "Expected item not found: '{}'".format(item)
+
+
+@contextmanager
+def emit_dots_on_travis():
+ """emit a dot every 60 seconds if running on Travis.
+
+ Travis will kill jobs that don't emit output for a certain amount of time.
+ This context manager spins up a background process which will emit a dot to
+ stdout every 60 seconds to avoid being killed.
+
+ It should be wrapped selectively around operations that are known to take a
+ long time.
+ """
+ if os.environ.get('TRAVIS') != "true":
+ # If we aren't on Travis, don't do anything.
+ yield
+ return
+
+ def emit_dots():
+ while True:
+ log.info(".")
+ time.sleep(60)
+
+ dot_process = multiprocessing.Process(target=emit_dots)
+ dot_process.start()
+ try:
+ yield
+ finally:
+ dot_process.terminate()
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 74f85959..f5cf514d 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -224,7 +224,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
expected_doc_sections = [
'**Supported distros:** all',
- '**Supported distros:** alpine, centos, debian, fedora',
+ '**Supported distros:** almalinux, alpine, centos, debian, fedora',
'**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
'**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
]
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index dedebeb1..742d1faa 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -108,7 +108,7 @@ NETWORK_METADATA = {
"zone": "",
"publicKeys": [
{
- "keyData": "key1",
+ "keyData": "ssh-rsa key1",
"path": "path1"
}
]
@@ -448,7 +448,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
"http://169.254.169.254/metadata/instance?api-version="
"2019-06-01", exception_cb=mock.ANY,
headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
+ timeout=mock.ANY, infinite=False)
@mock.patch(MOCKPATH + 'readurl', autospec=True)
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
@@ -467,7 +467,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
"http://169.254.169.254/metadata/instance/network?api-version="
"2019-06-01", exception_cb=mock.ANY,
headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
+ timeout=mock.ANY, infinite=False)
@mock.patch(MOCKPATH + 'readurl', autospec=True)
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
@@ -486,7 +486,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
"http://169.254.169.254/metadata/instance?api-version="
"2019-06-01", exception_cb=mock.ANY,
headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
+ timeout=mock.ANY, infinite=False)
@mock.patch(MOCKPATH + 'readurl', autospec=True)
@mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
@@ -511,7 +511,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
m_readurl.assert_called_with(
self.network_md_url, exception_cb=mock.ANY,
headers={'Metadata': 'true'}, retries=2,
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False)
@mock.patch('cloudinit.url_helper.time.sleep')
@mock.patch(MOCKPATH + 'net.is_up', autospec=True)
@@ -638,17 +638,10 @@ scbus-1 on xpt0 bus 0
def dsdevs():
return data.get('dsdevs', [])
- def _invoke_agent(cmd):
- data['agent_invoked'] = cmd
-
def _wait_for_files(flist, _maxwait=None, _naplen=None):
data['waited'] = flist
return []
- def _pubkeys_from_crt_files(flist):
- data['pubkey_files'] = flist
- return ["pubkey_from: %s" % f for f in flist]
-
if data.get('ovfcontent') is not None:
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
{'ovf-env.xml': data['ovfcontent']})
@@ -675,8 +668,6 @@ scbus-1 on xpt0 bus 0
self.apply_patches([
(dsaz, 'list_possible_azure_ds_devs', dsdevs),
- (dsaz, 'invoke_agent', _invoke_agent),
- (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
(dsaz, 'perform_hostname_bounce', mock.MagicMock()),
(dsaz, 'get_hostname', mock.MagicMock()),
(dsaz, 'set_hostname', mock.MagicMock()),
@@ -765,7 +756,6 @@ scbus-1 on xpt0 bus 0
ret = dsrc.get_data()
self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
# Assert that for non viable platforms,
# there is no communication with the Azure datasource.
self.assertEqual(
@@ -789,7 +779,6 @@ scbus-1 on xpt0 bus 0
ret = dsrc.get_data()
self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
self.assertEqual(
1,
m_report_failure.call_count)
@@ -806,7 +795,6 @@ scbus-1 on xpt0 bus 0
1,
m_crawl_metadata.call_count)
self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
def test_crawl_metadata_exception_should_report_failure_with_msg(self):
data = {}
@@ -1086,21 +1074,6 @@ scbus-1 on xpt0 bus 0
self.assertTrue(os.path.isdir(self.waagent_d))
self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
- def test_user_cfg_set_agent_command_plain(self):
- # set dscfg in via plaintext
- # we must have friendly-to-xml formatted plaintext in yaml_cfg
- # not all plaintext is expected to work.
- yaml_cfg = "{agent_command: my_command}\n"
- cfg = yaml.safe_load(yaml_cfg)
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
@mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
return_value=None)
def test_network_config_set_from_imds(self, m_driver):
@@ -1205,29 +1178,6 @@ scbus-1 on xpt0 bus 0
dsrc.get_data()
self.assertEqual('eastus2', dsrc.region)
- def test_user_cfg_set_agent_command(self):
- # set dscfg in via base64 encoded yaml
- cfg = {'agent_command': "my_command"}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
- def test_sys_cfg_set_agent_command(self):
- sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], '_COMMAND')
-
def test_sys_cfg_set_never_destroy_ntfs(self):
sys_cfg = {'datasource': {'Azure': {
'never_destroy_ntfs': 'user-supplied-value'}}}
@@ -1311,51 +1261,6 @@ scbus-1 on xpt0 bus 0
self.assertTrue(ret)
self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
- def test_cfg_has_pubkeys_fingerprint(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1])
-
- def test_cfg_has_pubkeys_value(self):
- # make sure that provided key is used over fingerprint
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
- def test_cfg_has_no_fingerprint_has_value(self):
- # test value is used when fingerprint not provided
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
def test_default_ephemeral_configs_ephemeral_exists(self):
# make sure the ephemeral configs are correct if disk present
odata = {}
@@ -1856,8 +1761,41 @@ scbus-1 on xpt0 bus 0
dsrc.get_data()
dsrc.setup(True)
ssh_keys = dsrc.get_public_ssh_keys()
- # Temporarily alter this test so that SSH public keys
- # from IMDS are *not* going to be in use to fix a regression.
+ self.assertEqual(ssh_keys, ["ssh-rsa key1"])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ def test_key_without_crlf_valid(self):
+ test_key = 'ssh-rsa somerandomkeystuff some comment'
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_with_crlf_invalid(self):
+ test_key = 'ssh-rsa someran\r\ndomkeystuff some comment'
+ assert False is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_endswith_crlf_valid(self):
+ test_key = 'ssh-rsa somerandomkeystuff some comment\r\n'
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ @mock.patch(
+ 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_get_public_ssh_keys_with_no_openssh_format(
+ self,
+ m_get_metadata_from_imds,
+ m_parse_certificates):
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format'
+ m_get_metadata_from_imds.return_value = imds_data
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
self.assertEqual(ssh_keys, [])
self.assertEqual(m_parse_certificates.call_count, 0)
@@ -1913,6 +1851,116 @@ scbus-1 on xpt0 bus 0
self.assertIsNotNone(dsrc.metadata)
self.assertFalse(dsrc.failed_desired_api_version)
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_hostname_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true"
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
+
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_username_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true"
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"],
+ "username1"
+ )
+
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_disable_password_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true"
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertTrue(dsrc.metadata["disable_password"])
+
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_userdata_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ userdata = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdata)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8'))
+
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_userdata_from_imds_with_customdata_from_OVF(
+ self, m_get_metadata_from_imds):
+ userdataOVF = "userdataOVF"
+ odata = {
+ 'HostName': "myhost", 'UserName': "myuser",
+ 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'}
+ }
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+
+ userdataImds = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdataImds)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8'))
+
class TestAzureBounce(CiTestCase):
@@ -1920,8 +1968,6 @@ class TestAzureBounce(CiTestCase):
def mock_out_azure_moving_parts(self):
self.patches.enter_context(
- mock.patch.object(dsaz, 'invoke_agent'))
- self.patches.enter_context(
mock.patch.object(dsaz.util, 'wait_for_files'))
self.patches.enter_context(
mock.patch.object(dsaz, 'list_possible_azure_ds_devs',
@@ -2710,15 +2756,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
def nic_attach_ret(nl_sock, nics_found):
nonlocal m_attach_call_count
- if m_attach_call_count == 0:
- m_attach_call_count = m_attach_call_count + 1
+ m_attach_call_count = m_attach_call_count + 1
+ if m_attach_call_count == 1:
return "eth0"
- return "eth1"
+ elif m_attach_call_count == 2:
+ return "eth1"
+ raise RuntimeError("Must have found primary nic by now.")
+
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA['interface'][0],
+ IMDS_NETWORK_METADATA['interface'][0]
+ ]
+ }
- def network_metadata_ret(ifname, retries, type):
- # Simulate two NICs by adding the same one twice.
- md = IMDS_NETWORK_METADATA
- md['interface'].append(md['interface'][0])
+ def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
if ifname == "eth0":
return md
raise requests.Timeout('Fake connection timeout')
@@ -2740,6 +2793,72 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(1, m_imds.call_count)
self.assertEqual(2, m_link_up.call_count)
+ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ def test_check_if_nic_is_primary_retries_on_failures(
+ self, m_dhcpv4, m_imds):
+ """Retry polling for network metadata on all failures except timeout"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}
+
+ eth0Retries = []
+ eth1Retries = []
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA['interface'][0],
+ IMDS_NETWORK_METADATA['interface'][0]
+ ]
+ }
+
+ def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
+ nonlocal eth0Retries, eth1Retries
+
+ # Simulate readurl functionality with retries and
+ # exception callbacks so that the callback logic can be
+ # validated.
+ if ifname == "eth0":
+ cause = requests.HTTPError()
+ for _ in range(0, 15):
+ error = url_helper.UrlError(cause=cause, code=410)
+ eth0Retries.append(exc_cb("No goal state.", error))
+ else:
+ cause = requests.Timeout('Fake connection timeout')
+ for _ in range(0, 10):
+ error = url_helper.UrlError(cause=cause)
+ eth1Retries.append(exc_cb("Connection timeout", error))
+ # Should stop retrying after 10 retries
+ eth1Retries.append(exc_cb("Connection timeout", error))
+ raise cause
+ return md
+
+ m_imds.side_effect = network_metadata_ret
+
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
+ self.assertEqual(True, is_primary)
+ self.assertEqual(2, expected_nic_count)
+
+ # All Eth0 errors are non-timeout errors. So we should have been
+ # retrying indefinitely until success.
+ for i in eth0Retries:
+ self.assertTrue(i)
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
+ self.assertEqual(False, is_primary)
+
+ # All Eth1 errors are timeout errors. Retry happens for a max of 10 and
+ # then we should have moved on assuming it is not the primary nic.
+ for i in range(0, 10):
+ self.assertTrue(eth1Retries[i])
+ self.assertFalse(eth1Retries[10])
+
@mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
def test_wait_for_link_up_returns_if_already_up(
self, m_is_link_up):
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index b8899807..552c7905 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -384,6 +384,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
max_readurl_attempts = 240
default_readurl_timeout = 5
+ sleep_duration_between_retries = 5
periodic_logging_attempts = 12
def setUp(self):
@@ -394,8 +395,8 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
self.m_readurl = patches.enter_context(
mock.patch.object(
azure_helper.url_helper, 'readurl', mock.MagicMock()))
- patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ self.m_sleep = patches.enter_context(
+ mock.patch.object(azure_helper.time, 'sleep', autospec=True))
def test_http_with_retries(self):
self.m_readurl.return_value = 'TestResp'
@@ -438,6 +439,12 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
self.m_readurl.call_count,
self.periodic_logging_attempts + 1)
+ # Ensure that cloud-init did sleep between each failed request
+ self.assertEqual(
+ self.m_sleep.call_count,
+ self.periodic_logging_attempts)
+ self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
+
def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
self.m_readurl.side_effect = \
[SentinelException] * self.periodic_logging_attempts + \
@@ -1002,6 +1009,14 @@ class TestWALinuxAgentShim(CiTestCase):
self.GoalState.return_value.container_id = self.test_container_id
self.GoalState.return_value.instance_id = self.test_instance_id
+ def test_eject_iso_is_called(self):
+ shim = wa_shim()
+ with mock.patch.object(
+ shim, 'eject_iso', autospec=True
+ ) as m_eject_iso:
+ shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0")
+ m_eject_iso.assert_called_once_with("/dev/sr0")
+
def test_http_client_does_not_use_certificate_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
@@ -1276,13 +1291,14 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
def test_calls_shim_register_with_azure_and_fetch_data(self):
m_pubkey_info = mock.MagicMock()
- azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info)
+ azure_helper.get_metadata_from_fabric(
+ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0")
self.assertEqual(
1,
self.m_shim.return_value
.register_with_azure_and_fetch_data.call_count)
self.assertEqual(
- mock.call(pubkey_info=m_pubkey_info),
+ mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info),
self.m_shim.return_value
.register_with_azure_and_fetch_data.call_args)
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 5912f7ee..5e9c547a 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -28,6 +28,7 @@ from cloudinit.sources import (
DataSourceScaleway as Scaleway,
DataSourceSmartOS as SmartOS,
DataSourceUpCloud as UpCloud,
+ DataSourceVultr as Vultr,
)
from cloudinit.sources import DataSourceNone as DSNone
@@ -45,6 +46,7 @@ DEFAULT_LOCAL = [
Oracle.DataSourceOracle,
OVF.DataSourceOVF,
SmartOS.DataSourceSmartOS,
+ Vultr.DataSourceVultr,
Ec2.DataSourceEc2Local,
OpenStack.DataSourceOpenStackLocal,
RbxCloud.DataSourceRbxCloud,
diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/test_datasource/test_vultr.py
new file mode 100644
index 00000000..bbea2aa3
--- /dev/null
+++ b/tests/unittests/test_datasource/test_vultr.py
@@ -0,0 +1,343 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+import json
+
+from cloudinit import helpers
+from cloudinit import settings
+from cloudinit.sources import DataSourceVultr
+from cloudinit.sources.helpers import vultr
+
+from cloudinit.tests.helpers import mock, CiTestCase
+
+# Vultr metadata test data
+VULTR_V1_1 = {
+ 'bgp': {
+ 'ipv4': {
+ 'my-address': '',
+ 'my-asn': '',
+ 'peer-address': '',
+ 'peer-asn': ''
+ },
+ 'ipv6': {
+ 'my-address': '',
+ 'my-asn': '',
+ 'peer-address': '',
+ 'peer-asn': ''
+ }
+ },
+ 'hostname': 'CLOUDINIT_1',
+ 'instanceid': '42506325',
+ 'interfaces': [
+ {
+ 'ipv4': {
+ 'additional': [
+ ],
+ 'address': '108.61.89.242',
+ 'gateway': '108.61.89.1',
+ 'netmask': '255.255.255.0'
+ },
+ 'ipv6': {
+ 'additional': [
+ ],
+ 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465',
+ 'network': '2001:19f0:5:56c2::',
+ 'prefix': '64'
+ },
+ 'mac': '56:00:03:15:c4:65',
+ 'network-type': 'public'
+ }
+ ],
+ 'public-keys': [
+ 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key'
+ ],
+ 'region': {
+ 'regioncode': 'EWR'
+ },
+ 'user-defined': [
+ ],
+ 'startup-script': 'echo No configured startup script',
+ 'raid1-script': '',
+ 'user-data': [
+ ],
+ 'vendor-data': {
+ 'vendor-script': '',
+ 'ethtool-script': '',
+ 'config': {
+ 'package_upgrade': 'true',
+ 'disable_root': 0,
+ 'ssh_pwauth': 1,
+ 'chpasswd': {
+ 'expire': False,
+ 'list': [
+ 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/'
+ ]
+ },
+ 'system_info': {
+ 'default_user': {
+ 'name': 'root'
+ }
+ }
+ }
+ }
+}
+
+VULTR_V1_2 = {
+ 'bgp': {
+ 'ipv4': {
+ 'my-address': '',
+ 'my-asn': '',
+ 'peer-address': '',
+ 'peer-asn': ''
+ },
+ 'ipv6': {
+ 'my-address': '',
+ 'my-asn': '',
+ 'peer-address': '',
+ 'peer-asn': ''
+ }
+ },
+ 'hostname': 'CLOUDINIT_2',
+ 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f',
+ 'instanceid': '42872224',
+ 'interfaces': [
+ {
+ 'ipv4': {
+ 'additional': [
+ ],
+ 'address':'45.76.7.171',
+ 'gateway':'45.76.6.1',
+ 'netmask':'255.255.254.0'
+ },
+ 'ipv6':{
+ 'additional': [
+ ],
+ 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca',
+ 'network':'2001:19f0:5:28a7::',
+ 'prefix':'64'
+ },
+ 'mac':'56:00:03:1b:4e:ca',
+ 'network-type':'public'
+ },
+ {
+ 'ipv4': {
+ 'additional': [
+ ],
+ 'address':'10.1.112.3',
+ 'gateway':'',
+ 'netmask':'255.255.240.0'
+ },
+ 'ipv6':{
+ 'additional': [
+ ],
+ 'network':'',
+ 'prefix':''
+ },
+ 'mac':'5a:00:03:1b:4e:ca',
+ 'network-type':'private',
+ 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64',
+ 'networkid':'net5e7155329d730'
+ }
+ ],
+ 'public-keys': [
+ 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key'
+ ],
+ 'region': {
+ 'regioncode': 'EWR'
+ },
+ 'user-defined': [
+ ],
+ 'startup-script': 'echo No configured startup script',
+ 'user-data': [
+ ],
+
+ 'vendor-data': {
+ 'vendor-script': '',
+ 'ethtool-script': '',
+ 'raid1-script': '',
+ 'config': {
+ 'package_upgrade': 'true',
+ 'disable_root': 0,
+ 'ssh_pwauth': 1,
+ 'chpasswd': {
+ 'expire': False,
+ 'list': [
+ 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1'
+ ]
+ },
+ 'system_info': {
+ 'default_user': {
+ 'name': 'root'
+ }
+ }
+ }
+ }
+}
+
+SSH_KEYS_1 = [
+ "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"
+]
+
+# Expected generated objects
+
+# Expected config
+EXPECTED_VULTR_CONFIG = {
+ 'package_upgrade': 'true',
+ 'disable_root': 0,
+ 'ssh_pwauth': 1,
+ 'chpasswd': {
+ 'expire': False,
+ 'list': [
+ 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1'
+ ]
+ },
+ 'system_info': {
+ 'default_user': {
+ 'name': 'root'
+ }
+ }
+}
+
+# Expected network config object from generator
+EXPECTED_VULTR_NETWORK_1 = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'nameserver',
+ 'address': ['108.61.10.10']
+ },
+ {
+ 'name': 'eth0',
+ 'type': 'physical',
+ 'mac_address': '56:00:03:15:c4:65',
+ 'accept-ra': 1,
+ 'subnets': [
+ {'type': 'dhcp', 'control': 'auto'},
+ {'type': 'dhcp6', 'control': 'auto'}
+ ],
+ }
+ ]
+}
+
+EXPECTED_VULTR_NETWORK_2 = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'nameserver',
+ 'address': ['108.61.10.10']
+ },
+ {
+ 'name': 'eth0',
+ 'type': 'physical',
+ 'mac_address': '56:00:03:1b:4e:ca',
+ 'accept-ra': 1,
+ 'subnets': [
+ {'type': 'dhcp', 'control': 'auto'},
+ {'type': 'dhcp6', 'control': 'auto'}
+ ],
+ },
+ {
+ 'name': 'eth1',
+ 'type': 'physical',
+ 'mac_address': '5a:00:03:1b:4e:ca',
+ 'accept-ra': 1,
+ 'subnets': [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": "10.1.112.3",
+ "netmask": "255.255.240.0"
+ }
+ ],
+ }
+ ]
+}
+
+
+INTERFACE_MAP = {
+ '56:00:03:15:c4:65': 'eth0',
+ '56:00:03:1b:4e:ca': 'eth0',
+ '5a:00:03:1b:4e:ca': 'eth1'
+}
+
+
+class TestDataSourceVultr(CiTestCase):
+ def setUp(self):
+ super(TestDataSourceVultr, self).setUp()
+
+ # Stored as a dict to make it easier to maintain
+ raw1 = json.dumps(VULTR_V1_1['vendor-data']['config'])
+ raw2 = json.dumps(VULTR_V1_2['vendor-data']['config'])
+
+ # Make expected format
+ VULTR_V1_1['vendor-data']['config'] = raw1
+ VULTR_V1_2['vendor-data']['config'] = raw2
+
+ self.tmp = self.tmp_dir()
+
+ # Test the datasource itself
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.sources.helpers.vultr.is_vultr')
+ @mock.patch('cloudinit.sources.helpers.vultr.get_metadata')
+ def test_datasource(self,
+ mock_getmeta,
+ mock_isvultr,
+ mock_netmap):
+ mock_getmeta.return_value = VULTR_V1_2
+ mock_isvultr.return_value = True
+ mock_netmap.return_value = INTERFACE_MAP
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+
+ # Test for failure
+ self.assertEqual(True, source._get_data())
+
+ # Test instance id
+ self.assertEqual("42872224", source.metadata['instanceid'])
+
+ # Test hostname
+ self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname'])
+
+ # Test ssh keys
+ self.assertEqual(SSH_KEYS_1, source.metadata['public-keys'])
+
+ # Test vendor data generation
+ orig_val = self.maxDiff
+ self.maxDiff = None
+
+ vendordata = source.vendordata_raw
+
+ # Test vendor config
+ self.assertEqual(
+ EXPECTED_VULTR_CONFIG,
+ json.loads(vendordata[0].replace("#cloud-config", "")))
+
+ self.maxDiff = orig_val
+
+ # Test network config generation
+ self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+
+ # Test network config generation
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_1['interfaces']
+
+ self.assertEqual(EXPECTED_VULTR_NETWORK_1,
+ vultr.generate_network_config(interf))
+
+ # Test Private Networking config generation
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_private_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_2['interfaces']
+
+ self.assertEqual(EXPECTED_VULTR_NETWORK_2,
+ vultr.generate_network_config(interf))
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index 58abf51a..73641b70 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -15,6 +15,7 @@ import os
import shutil
import tempfile
from io import BytesIO
+from unittest import mock
LOG = logging.getLogger(__name__)
@@ -29,14 +30,53 @@ class TestHostname(t_help.FilesystemMockingTestCase):
util.ensure_dir(os.path.join(self.tmp, 'data'))
self.addCleanup(shutil.rmtree, self.tmp)
- def _fetch_distro(self, kind):
+ def _fetch_distro(self, kind, conf=None):
cls = distros.fetch(kind)
paths = helpers.Paths({'cloud_dir': self.tmp})
- return cls(kind, {}, paths)
+ conf = {} if conf is None else conf
+ return cls(kind, conf, paths)
- def test_write_hostname_rhel(self):
+ def test_debian_write_hostname_prefer_fqdn(self):
cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
+ 'hostname': 'blah',
+ 'prefer_fqdn_over_hostname': True,
+ 'fqdn': 'blah.yahoo.com',
+ }
+ distro = self._fetch_distro('debian', cfg)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle('cc_set_hostname',
+ cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual('blah.yahoo.com', contents.strip())
+
+ @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False)
+ def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd):
+ cfg = {
+ 'hostname': 'blah',
+ 'prefer_fqdn_over_hostname': False,
+ 'fqdn': 'blah.yahoo.com',
+ }
+ distro = self._fetch_distro('rhel', cfg)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle('cc_set_hostname',
+ cfg, cc, LOG, [])
+ contents = util.load_file("/etc/sysconfig/network", decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual(
+ {'HOSTNAME': 'blah'},
+ dict(n_cfg))
+
+ @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False)
+ def test_write_hostname_rhel(self, m_uses_systemd):
+ cfg = {
+ 'hostname': 'blah',
+ 'fqdn': 'blah.blah.blah.yahoo.com'
}
distro = self._fetch_distro('rhel')
paths = helpers.Paths({'cloud_dir': self.tmp})
@@ -45,15 +85,16 @@ class TestHostname(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_set_hostname.handle('cc_set_hostname',
cfg, cc, LOG, [])
- if not distro.uses_systemd():
- contents = util.load_file("/etc/sysconfig/network", decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'HOSTNAME': 'blah.blah.blah.yahoo.com'},
- dict(n_cfg))
+ contents = util.load_file("/etc/sysconfig/network", decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual(
+ {'HOSTNAME': 'blah.blah.blah.yahoo.com'},
+ dict(n_cfg))
def test_write_hostname_debian(self):
cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
+ 'hostname': 'blah',
+ 'fqdn': 'blah.blah.blah.yahoo.com',
}
distro = self._fetch_distro('debian')
paths = helpers.Paths({'cloud_dir': self.tmp})
@@ -65,7 +106,8 @@ class TestHostname(t_help.FilesystemMockingTestCase):
contents = util.load_file("/etc/hostname")
self.assertEqual('blah', contents.strip())
- def test_write_hostname_sles(self):
+ @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False)
+ def test_write_hostname_sles(self, m_uses_systemd):
cfg = {
'hostname': 'blah.blah.blah.suse.com',
}
@@ -75,9 +117,8 @@ class TestHostname(t_help.FilesystemMockingTestCase):
cc = cloud.Cloud(ds, paths, {}, distro, None)
self.patchUtils(self.tmp)
cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, [])
- if not distro.uses_systemd():
- contents = util.load_file(distro.hostname_conf_fn)
- self.assertEqual('blah', contents.strip())
+ contents = util.load_file(distro.hostname_conf_fn)
+ self.assertEqual('blah', contents.strip())
def test_multiple_calls_skips_unchanged_hostname(self):
"""Only new hostname or fqdn values will generate a hostname call."""
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index cb636f41..b72a62b8 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1654,7 +1654,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
'expected_sysconfig_opensuse': {
'ifcfg-bond0': textwrap.dedent("""\
BONDING_MASTER=yes
- BONDING_OPTS="mode=active-backup """
+ BONDING_MODULE_OPTS="mode=active-backup """
"""xmit_hash_policy=layer3+4 """
"""miimon=100"
BONDING_SLAVE_0=eth1
@@ -2240,7 +2240,7 @@ iface bond0 inet6 static
'expected_sysconfig_opensuse': {
'ifcfg-bond0': textwrap.dedent("""\
BONDING_MASTER=yes
- BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
+ BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
"""miimon=100 num_grat_arp=5 """
"""downdelay=10 updelay=20 """
"""fail_over_mac=active """
@@ -4387,6 +4387,56 @@ class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase):
)
self.assertFalse(src.is_applicable())
+ def test_with_faux_ip(self):
+ content = {'net6-eno1.conf': DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline='foo iscsi_target_ip=root=/dev/sda',
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_empty_cmdline(self):
+ content = {'net6-eno1.conf': DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline='',
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_whitespace_cmdline(self):
+ content = {'net6-eno1.conf': DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline=' ',
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_cmdline_no_lhand(self):
+ content = {'net6-eno1.conf': DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline='=wut',
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_cmdline_embedded_ip(self):
+ content = {'net6-eno1.conf': DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline='opt="some things and ip=foo"',
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
def test_with_both_ip_ip6(self):
content = {
'/run/net-eth0.conf': DHCP_CONTENT_1,
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 5c57acac..48995057 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -3,19 +3,24 @@ ajmyyra
AlexBaranowski
Aman306
andrewbogott
+andrewlukoshko
antonyc
aswinrajamannar
beezly
bipinbachhao
BirknerAlex
+bmhughes
candlerb
cawamata
dankenigsberg
+ddymko
dermotbradley
dhensby
eandersson
eb3095
emmanuelthome
+giggsoff
+hamalq
izzyleung
johnsonshi
jordimassaguerpla
@@ -29,6 +34,7 @@ manuelisimo
marlluslustosa
matthewruffell
mitechie
+nicolasbock
nishigori
olivierlemasle
omBratteng
@@ -40,9 +46,11 @@ smoser
sshedi
TheRealFalcon
taoyama
+timothegenzmer
tnt-dev
tomponline
tsanghan
+Vultaire
WebSpider
xiachen-rh
xnox
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
index 21171ac6..69329cb9 100644
--- a/tools/.lp-to-git-user
+++ b/tools/.lp-to-git-user
@@ -4,6 +4,7 @@
"ahosmanmsft": "AOhassan",
"andreipoltavchenko": "pa-yourserveradmin-com",
"askon": "ask0n",
+ "b1sandmann": "B1Sandmann",
"bitfehler": "bitfehler",
"chad.smith": "blackboxsw",
"chcheng": "chengcheng-chcheng",
diff --git a/tools/ds-identify b/tools/ds-identify
index 2f2486f7..73e27c71 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -124,7 +124,7 @@ DI_DSNAME=""
# this has to match the builtin list in cloud-init, it is what will
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
-CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
+CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \
OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud"
DI_DSLIST=""
DI_MODE=""
@@ -1350,6 +1350,20 @@ dscheck_IBMCloud() {
return ${DS_NOT_FOUND}
}
+dscheck_Vultr() {
+ dmi_sys_vendor_is Vultr && return $DS_FOUND
+
+ case " $DI_KERNEL_CMDLINE " in
+ *\ vultr\ *) return $DS_FOUND ;;
+ esac
+
+ if [ -f "${PATH_ROOT}/etc/vultr" ]; then
+ return $DS_FOUND
+ fi
+
+ return $DS_NOT_FOUND
+}
+
collect_info() {
read_uname_info
read_virt
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index ed454840..f5990748 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -4,8 +4,8 @@ import argparse
import os
import sys
-VARIANTS = ["alpine", "amazon", "arch", "centos", "debian", "fedora",
- "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu",
+VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
+ "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu",
"unknown"]
diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints
index 2a3dca7c..9409257d 100755
--- a/tools/write-ssh-key-fingerprints
+++ b/tools/write-ssh-key-fingerprints
@@ -1,39 +1,61 @@
#!/bin/sh
# This file is part of cloud-init. See LICENSE file for license information.
-logger_opts="-p user.info -t ec2"
-# rhels' version of logger_opts does not support long
-# for of -s (--stderr), so use short form.
-logger_opts="$logger_opts -s"
+do_syslog() {
+ log_message=$1
+
+ # rhels' version of logger_opts does not support long
+ # form of -s (--stderr), so use short form.
+ logger_opts="-s"
+
+ # Need to end the options list with "--" to ensure that any minus symbols
+ # in the text passed to logger are not interpreted as logger options.
+ logger_opts="$logger_opts -p user.info -t cloud-init --"
+
+ # shellcheck disable=SC2086 # logger give error if $logger_opts quoted
+ logger $logger_opts "$log_message"
+}
+
# Redirect stderr to stdout
exec 2>&1
fp_blist=",${1},"
key_blist=",${2},"
-{
-echo
-echo "#############################################################"
-echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----"
+
+fingerprint_header_shown=0
for f in /etc/ssh/ssh_host_*key.pub; do
[ -f "$f" ] || continue
- read ktype line < "$f"
+ # shellcheck disable=SC2034 # Unused "line" required for word splitting
+ read -r ktype line < "$f"
# skip the key if its type is in the blacklist
[ "${fp_blist#*,$ktype,}" = "${fp_blist}" ] || continue
- ssh-keygen -l -f "$f"
+ if [ $fingerprint_header_shown -eq 0 ]; then
+ do_syslog "#############################################################"
+ do_syslog "-----BEGIN SSH HOST KEY FINGERPRINTS-----"
+ fingerprint_header_shown=1
+ fi
+ do_syslog "$(ssh-keygen -l -f "$f")"
done
-echo "-----END SSH HOST KEY FINGERPRINTS-----"
-echo "#############################################################"
-
-} | logger $logger_opts
+if [ $fingerprint_header_shown -eq 1 ]; then
+ do_syslog "-----END SSH HOST KEY FINGERPRINTS-----"
+ do_syslog "#############################################################"
+fi
-echo "-----BEGIN SSH HOST KEY KEYS-----"
+key_header_shown=0
for f in /etc/ssh/ssh_host_*key.pub; do
[ -f "$f" ] || continue
- read ktype line < "$f"
+ # shellcheck disable=SC2034 # Unused "line" required for word splitting
+ read -r ktype line < "$f"
# skip the key if its type is in the blacklist
[ "${key_blist#*,$ktype,}" = "${key_blist}" ] || continue
- cat $f
+ if [ $key_header_shown -eq 0 ]; then
+ echo "-----BEGIN SSH HOST KEY KEYS-----"
+ key_header_shown=1
+ fi
+ cat "$f"
done
-echo "-----END SSH HOST KEY KEYS-----"
+if [ $key_header_shown -eq 1 ]; then
+ echo "-----END SSH HOST KEY KEYS-----"
+fi
diff --git a/tox.ini b/tox.ini
index 3158ebd5..bf8cb78b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -153,7 +153,7 @@ deps =
[testenv:integration-tests-ci]
commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_*
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* TRAVIS
deps =
-r{toxinidir}/integration-requirements.txt
setenv =