summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-08-13 15:37:31 -0500
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2021-08-13 21:39:10 +0000
commit4559ea73d29f34098c4ee8c62f9046548bb1ae87 (patch)
treed9d155465a841dded7ca07879b7f787821a943d4
parenta85ca5b85b69bb228b136c1dba23e3a289115b5c (diff)
downloadcloud-init-git-4559ea73d29f34098c4ee8c62f9046548bb1ae87.tar.gz
21.2-69-g65607405-0ubuntu1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--README.md2
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py123
-rw-r--r--cloudinit/cmd/main.py2
-rw-r--r--cloudinit/config/cc_disk_setup.py13
-rw-r--r--cloudinit/config/cc_mounts.py17
-rw-r--r--cloudinit/config/cc_ntp.py9
-rw-r--r--cloudinit/config/cc_puppet.py159
-rw-r--r--cloudinit/config/cc_resolv_conf.py7
-rw-r--r--cloudinit/config/cc_yum_add_repo.py6
-rwxr-xr-xcloudinit/distros/__init__.py6
-rw-r--r--cloudinit/distros/alpine.py8
-rw-r--r--cloudinit/distros/arch.py8
-rw-r--r--cloudinit/distros/debian.py8
-rw-r--r--cloudinit/distros/eurolinux.py9
-rw-r--r--cloudinit/distros/gentoo.py8
-rw-r--r--cloudinit/distros/opensuse.py10
-rw-r--r--cloudinit/distros/photon.py73
-rw-r--r--cloudinit/distros/rhel.py8
-rw-r--r--cloudinit/distros/virtuozzo.py9
-rw-r--r--cloudinit/net/__init__.py12
-rw-r--r--cloudinit/net/activators.py27
-rw-r--r--cloudinit/net/networkd.py6
-rw-r--r--cloudinit/net/sysconfig.py3
-rw-r--r--cloudinit/patcher.py11
-rw-r--r--cloudinit/settings.py1
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py296
-rw-r--r--cloudinit/sources/DataSourceOVF.py14
-rw-r--r--cloudinit/sources/DataSourceVMware.py871
-rw-r--r--cloudinit/sources/__init__.py18
-rwxr-xr-xcloudinit/sources/helpers/azure.py34
-rw-r--r--cloudinit/ssh_util.py133
-rw-r--r--cloudinit/tests/.test_util.py.swpbin16384 -> 0 bytes
-rw-r--r--cloudinit/tests/test_util.py98
-rw-r--r--cloudinit/util.py70
-rw-r--r--config/cloud.cfg.tmpl22
-rw-r--r--debian/changelog43
-rw-r--r--debian/cloud-init.templates6
-rw-r--r--debian/control2
-rw-r--r--debian/po/templates.pot6
-rw-r--r--doc/examples/cloud-config-puppet.txt60
-rw-r--r--doc/rtd/topics/availability.rst4
-rw-r--r--doc/rtd/topics/datasources.rst1
-rw-r--r--doc/rtd/topics/datasources/vmware.rst359
-rw-r--r--doc/rtd/topics/events.rst8
-rw-r--r--doc/rtd/topics/network-config.rst7
-rw-r--r--doc/sources/ovf/example/ovf-env.xml8
-rw-r--r--packages/pkg-deps.json14
-rw-r--r--requirements.txt9
-rw-r--r--systemd/cloud-init-generator.tmpl3
-rw-r--r--systemd/cloud-init.service.tmpl3
-rw-r--r--tests/cloud_tests/testcases/examples/setup_run_puppet.yaml10
-rw-r--r--tests/integration_tests/bugs/test_lp1920939.py140
-rw-r--r--tests/integration_tests/modules/test_combined.py175
-rw-r--r--tests/integration_tests/modules/test_command_output.py23
-rw-r--r--tests/integration_tests/modules/test_disk_setup.py192
-rw-r--r--tests/integration_tests/modules/test_hotplug.py14
-rw-r--r--tests/integration_tests/modules/test_ntp_servers.py89
-rw-r--r--tests/integration_tests/modules/test_snap.py2
-rw-r--r--tests/integration_tests/modules/test_ssh_import_id.py6
-rw-r--r--tests/integration_tests/util.py30
-rw-r--r--tests/unittests/test_cli.py3
-rw-r--r--tests/unittests/test_datasource/test_azure.py81
-rw-r--r--tests/unittests/test_datasource/test_common.py3
-rw-r--r--tests/unittests/test_datasource/test_ovf.py97
-rw-r--r--tests/unittests/test_datasource/test_vmware.py377
-rw-r--r--tests/unittests/test_distros/test_create_users.py2
-rw-r--r--tests/unittests/test_distros/test_photon.py68
-rw-r--r--tests/unittests/test_ds_identify.py279
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py9
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py236
-rw-r--r--tests/unittests/test_handler/test_handler_resolv_conf.py105
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py26
-rw-r--r--tests/unittests/test_net.py1
-rw-r--r--tests/unittests/test_net_activators.py27
-rw-r--r--tests/unittests/test_render_cloudcfg.py6
-rw-r--r--tests/unittests/test_sshutil.py952
-rw-r--r--tools/.github-cla-signers5
-rwxr-xr-xtools/ds-identify85
-rwxr-xr-xtools/hook-hotplug9
-rwxr-xr-xtools/read-dependencies8
-rwxr-xr-xtools/render-cloudcfg4
-rw-r--r--tox.ini2
82 files changed, 4927 insertions, 773 deletions
diff --git a/README.md b/README.md
index 462e3204..5828c2fa 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index 0282f24a..a0058f03 100644
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -3,6 +3,7 @@
import abc
import argparse
import os
+import sys
import time
from cloudinit import log
@@ -12,7 +13,7 @@ from cloudinit.net import activators, read_sys_net_safe
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.reporting import events
from cloudinit.stages import Init
-from cloudinit.sources import DataSource
+from cloudinit.sources import DataSource, DataSourceNotFoundException
LOG = log.getLogger(__name__)
@@ -31,15 +32,35 @@ def get_parser(parser=None):
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.description = __doc__
- parser.add_argument("-d", "--devpath", required=True,
- metavar="PATH",
- help="sysfs path to hotplugged device")
- parser.add_argument("-s", "--subsystem", required=True,
- help="subsystem to act on",
- choices=['net'])
- parser.add_argument("-u", "--udevaction", required=True,
- help="action to take",
- choices=['add', 'remove'])
+ parser.add_argument(
+ "-s", "--subsystem", required=True,
+ help="subsystem to act on",
+ choices=['net']
+ )
+
+ subparsers = parser.add_subparsers(
+ title='Hotplug Action',
+ dest='hotplug_action'
+ )
+ subparsers.required = True
+
+ subparsers.add_parser(
+ 'query',
+ help='query if hotplug is enabled for given subsystem'
+ )
+
+ parser_handle = subparsers.add_parser(
+ 'handle', help='handle the hotplug event')
+ parser_handle.add_argument(
+ "-d", "--devpath", required=True,
+ metavar="PATH",
+ help="sysfs path to hotplugged device"
+ )
+ parser_handle.add_argument(
+ "-u", "--udevaction", required=True,
+ help="action to take",
+ choices=['add', 'remove']
+ )
return parser
@@ -133,27 +154,42 @@ SUBSYSTEM_PROPERTES_MAP = {
}
-def handle_hotplug(
- hotplug_init: Init, devpath, subsystem, udevaction
-):
- handler_cls, event_scope = SUBSYSTEM_PROPERTES_MAP.get(
- subsystem, (None, None)
- )
- if handler_cls is None:
+def is_enabled(hotplug_init, subsystem):
+ try:
+ scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
+ except KeyError as e:
raise Exception(
'hotplug-hook: cannot handle events for subsystem: {}'.format(
- subsystem))
+ subsystem)
+ ) from e
+
+ return hotplug_init.update_event_enabled(
+ event_source_type=EventType.HOTPLUG,
+ scope=scope
+ )
+
+def initialize_datasource(hotplug_init, subsystem):
LOG.debug('Fetching datasource')
datasource = hotplug_init.fetch(existing="trust")
- if not hotplug_init.update_event_enabled(
- event_source_type=EventType.HOTPLUG,
- scope=EventScope.NETWORK
- ):
- LOG.debug('hotplug not enabled for event of type %s', event_scope)
+ if not datasource.get_supported_events([EventType.HOTPLUG]):
+ LOG.debug('hotplug not supported for event of type %s', subsystem)
return
+ if not is_enabled(hotplug_init, subsystem):
+ LOG.debug('hotplug not enabled for event of type %s', subsystem)
+ return
+ return datasource
+
+
+def handle_hotplug(
+ hotplug_init: Init, devpath, subsystem, udevaction
+):
+ datasource = initialize_datasource(hotplug_init, subsystem)
+ if not datasource:
+ return
+ handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0]
LOG.debug('Creating %s event handler', subsystem)
event_handler = handler_cls(
datasource=datasource,
@@ -200,29 +236,36 @@ def handle_args(name, args):
log.setupLogging(hotplug_init.cfg)
if 'reporting' in hotplug_init.cfg:
reporting.update_configuration(hotplug_init.cfg.get('reporting'))
-
# Logging isn't going to be setup until now
LOG.debug(
- '%s called with the following arguments: {udevaction: %s, '
- 'subsystem: %s, devpath: %s}',
- name, args.udevaction, args.subsystem, args.devpath
- )
- LOG.debug(
- '%s called with the following arguments:\n'
- 'udevaction: %s\n'
- 'subsystem: %s\n'
- 'devpath: %s',
- name, args.udevaction, args.subsystem, args.devpath
+ '%s called with the following arguments: {'
+ 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}',
+ name,
+ args.hotplug_action,
+ args.subsystem,
+ args.udevaction if 'udevaction' in args else None,
+ args.devpath if 'devpath' in args else None,
)
with hotplug_reporter:
try:
- handle_hotplug(
- hotplug_init=hotplug_init,
- devpath=args.devpath,
- subsystem=args.subsystem,
- udevaction=args.udevaction,
- )
+ if args.hotplug_action == 'query':
+ try:
+ datasource = initialize_datasource(
+ hotplug_init, args.subsystem)
+ except DataSourceNotFoundException:
+ print(
+ "Unable to determine hotplug state. No datasource "
+ "detected")
+ sys.exit(1)
+ print('enabled' if datasource else 'disabled')
+ else:
+ handle_hotplug(
+ hotplug_init=hotplug_init,
+ devpath=args.devpath,
+ subsystem=args.subsystem,
+ udevaction=args.udevaction,
+ )
except Exception:
LOG.exception('Received fatal exception handling hotplug!')
raise
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 21213a4a..1de1de99 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -19,7 +19,7 @@ import time
import traceback
from cloudinit import patcher
-patcher.patch() # noqa
+patcher.patch_logging()
from cloudinit import log as logging
from cloudinit import netinfo
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 22af3813..3ec49ca5 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -125,9 +125,15 @@ def handle(_name, cfg, cloud, log, _args):
See doc/examples/cloud-config-disk-setup.txt for documentation on the
format.
"""
+ device_aliases = cfg.get("device_aliases", {})
+
+ def alias_to_device(cand):
+ name = device_aliases.get(cand)
+ return cloud.device_name_to_device(name or cand) or name
+
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
- update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
+ update_disk_setup_devices(disk_setup, alias_to_device)
log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
@@ -145,7 +151,7 @@ def handle(_name, cfg, cloud, log, _args):
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
log.debug("setting up filesystems: %s", str(fs_setup))
- update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
+ update_fs_setup_devices(fs_setup, alias_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warning("Invalid file system definition: %s" % definition)
@@ -174,7 +180,8 @@ def update_disk_setup_devices(disk_setup, tformer):
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
- disk_setup[transformed]['_origname'] = origname
+ if isinstance(disk_setup[transformed], dict):
+ disk_setup[transformed]['_origname'] = origname
del disk_setup[origname]
LOG.debug("updated disk_setup device entry '%s' to '%s'",
origname, transformed)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index c22d1698..eeb008d2 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -123,7 +123,7 @@ def _is_block_device(device_path, partition_path=None):
return os.path.exists(sys_path)
-def sanitize_devname(startname, transformer, log):
+def sanitize_devname(startname, transformer, log, aliases=None):
log.debug("Attempting to determine the real name of %s", startname)
# workaround, allow user to specify 'ephemeral'
@@ -137,9 +137,14 @@ def sanitize_devname(startname, transformer, log):
return startname
device_path, partition_number = util.expand_dotted_devname(devname)
+ orig = device_path
+
+ if aliases:
+ device_path = aliases.get(device_path, device_path)
+ if orig != device_path:
+ log.debug("Mapped device alias %s to %s", orig, device_path)
if is_meta_device_name(device_path):
- orig = device_path
device_path = transformer(device_path)
if not device_path:
return None
@@ -394,6 +399,8 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs[toks[0]] = line
fstab_lines.append(line)
+ device_aliases = cfg.get("device_aliases", {})
+
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
@@ -402,7 +409,8 @@ def handle(_name, cfg, cloud, log, _args):
continue
start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(start, cloud.device_name_to_device, log,
+ aliases=device_aliases)
if sanitized != start:
log.debug("changed %s => %s" % (start, sanitized))
@@ -444,7 +452,8 @@ def handle(_name, cfg, cloud, log, _args):
# entry has the same device name
for defmnt in defmnts:
start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(start, cloud.device_name_to_device, log,
+ aliases=device_aliases)
if sanitized != start:
log.debug("changed default device %s => %s" % (start, sanitized))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index acf3251d..7c371a49 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -24,8 +24,9 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
-distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse',
- 'photon', 'rhel', 'rocky', 'sles', 'ubuntu']
+distros = ['almalinux', 'alpine', 'centos', 'debian', 'eurolinux', 'fedora',
+ 'opensuse', 'photon', 'rhel', 'rocky', 'sles', 'ubuntu',
+ 'virtuozzo']
NTP_CLIENT_CONFIG = {
'chrony': {
@@ -405,9 +406,9 @@ def generate_server_names(distro):
# For legal reasons x.pool.sles.ntp.org does not exist,
# use the opensuse pool
pool_distro = 'opensuse'
- elif distro == 'alpine':
+ elif distro == 'alpine' or distro == 'eurolinux':
# Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
- # so use general x.pool.ntp.org instead.
+ # so use general x.pool.ntp.org instead. The same applies to EuroLinux
pool_distro = ''
for x in range(0, NR_POOL_SERVERS):
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index bc981cf4..a0779eb0 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -29,22 +29,41 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
ones that work with puppet 3.x and with distributions that ship modified
puppet 4.x that uses the old paths.
+Agent packages from the puppetlabs repositories can be installed by setting
+``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR
+paths will be adjusted accordingly. To maintain backwards compatibility this
+setting defaults to ``packages`` which will install puppet from the distro
+packages.
+
+If installing ``aio`` packages, ``collection`` can also be set to one of
+``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly
+counterparts) in order to install specific release streams. By default, the
+puppetlabs repository will be purged after installation finishes; set
+``cleanup`` to ``false`` to prevent this. AIO packages are installed through a
+shell script which is downloaded on the machine and then executed; the path to
+this script can be overridden using the ``aio_install_url`` key.
+
Puppet configuration can be specified under the ``conf`` key. The
configuration is specified as a dictionary containing high-level ``<section>``
keys and lists of ``<key>=<value>`` pairs within each section. Each section
name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
-such, section names should be one of: ``main``, ``master``, ``agent`` or
+such, section names should be one of: ``main``, ``server``, ``agent`` or
``user`` and keys should be valid puppet configuration options. The
``certname`` key supports string substitutions for ``%i`` and ``%f``,
corresponding to the instance id and fqdn of the machine respectively.
If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
-instead will be used as the puppermaster certificate. It should be specified
+instead will be used as the puppetserver certificate. It should be specified
in pem format as a multi-line string (using the ``|`` yaml notation).
-Additionally it's possible to create a csr_attributes.yaml for
-CSR attributes and certificate extension requests.
+Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR
+attributes and certificate extension requests.
See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
+The puppet service will be automatically enabled after installation. A manual
+run can also be triggered by setting ``exec`` to ``true``, and additional
+arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by
+default the agent will execute with the ``--test`` flag).
+
**Internal name:** ``cc_puppet``
**Module frequency:** per instance
@@ -56,13 +75,19 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
puppet:
install: <true/false>
version: <version>
+ collection: <aio collection>
+ install_type: <packages/aio>
+ aio_install_url: 'https://git.io/JBhoQ'
+ cleanup: <true/false>
conf_file: '/etc/puppet/puppet.conf'
ssl_dir: '/var/lib/puppet/ssl'
csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
package_name: 'puppet'
+ exec: <true/false>
+ exec_args: ['--test']
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
certname: "%i.%f"
ca_cert: |
-------BEGIN CERTIFICATE-------
@@ -84,12 +109,12 @@ from io import StringIO
from cloudinit import helpers
from cloudinit import subp
+from cloudinit import temp_utils
from cloudinit import util
+from cloudinit import url_helper
-PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
-PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
-PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml'
-PUPPET_PACKAGE_NAME = 'puppet'
+AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501
+PUPPET_AGENT_DEFAULT_ARGS = ['--test']
class PuppetConstants(object):
@@ -119,6 +144,43 @@ def _autostart_puppet(log):
" puppet services on this system"))
+def get_config_value(puppet_bin, setting):
+ """Get the config value for a given setting using `puppet config print`
+ :param puppet_bin: path to puppet binary
+ :param setting: setting to query
+ """
+ out, _ = subp.subp([puppet_bin, 'config', 'print', setting])
+ return out.rstrip()
+
+
+def install_puppet_aio(url=AIO_INSTALL_URL, version=None,
+ collection=None, cleanup=True):
+ """Install puppet-agent from the puppetlabs repositories using the one-shot
+ shell script
+
+ :param url: URL from where to download the install script
+ :param version: version to install, blank defaults to latest
+ :param collection: collection to install, blank defaults to latest
+ :param cleanup: whether to purge the puppetlabs repo after installation
+ """
+ args = []
+ if version is not None:
+ args = ['-v', version]
+ if collection is not None:
+ args += ['-c', collection]
+
+ # Purge puppetlabs repos after installation
+ if cleanup:
+ args += ['--cleanup']
+ content = url_helper.readurl(url=url, retries=5).contents
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, 'puppet-install')
+ util.write_file(tmpf, content, mode=0o700)
+ return subp.subp([tmpf] + args, capture=False)
+
+
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
if 'puppet' not in cfg:
@@ -130,23 +192,50 @@ def handle(name, cfg, cloud, log, _args):
# Start by installing the puppet package if necessary...
install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
version = util.get_cfg_option_str(puppet_cfg, 'version', None)
- package_name = util.get_cfg_option_str(
- puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME)
- conf_file = util.get_cfg_option_str(
- puppet_cfg, 'conf_file', PUPPET_CONF_PATH)
- ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR)
- csr_attributes_path = util.get_cfg_option_str(
- puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH)
+ collection = util.get_cfg_option_str(puppet_cfg, 'collection', None)
+ install_type = util.get_cfg_option_str(
+ puppet_cfg, 'install_type', 'packages')
+ cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True)
+ run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False)
+ aio_install_url = util.get_cfg_option_str(
+ puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL)
- p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
+ # AIO and distro packages use different paths
+ if install_type == 'aio':
+ puppet_user = 'root'
+ puppet_bin = '/opt/puppetlabs/bin/puppet'
+ puppet_package = 'puppet-agent'
+ else: # default to 'packages'
+ puppet_user = 'puppet'
+ puppet_bin = 'puppet'
+ puppet_package = 'puppet'
+
+ package_name = util.get_cfg_option_str(
+ puppet_cfg, 'package_name', puppet_package)
if not install and version:
- log.warning(("Puppet install set false but version supplied,"
+ log.warning(("Puppet install set to false but version supplied,"
" doing nothing."))
elif install:
- log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
+ log.debug(("Attempting to install puppet %s from %s"),
+ version if version else 'latest', install_type)
- cloud.distro.install_packages((package_name, version))
+ if install_type == "packages":
+ cloud.distro.install_packages((package_name, version))
+ elif install_type == "aio":
+ install_puppet_aio(aio_install_url, version, collection, cleanup)
+ else:
+ log.warning("Unknown puppet install type '%s'", install_type)
+ run = False
+
+ conf_file = util.get_cfg_option_str(
+ puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config'))
+ ssl_dir = util.get_cfg_option_str(
+ puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir'))
+ csr_attributes_path = util.get_cfg_option_str(
+ puppet_cfg, 'csr_attributes_path',
+ get_config_value(puppet_bin, 'csr_attributes'))
+
+ p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
# ... and then update the puppet configuration
if 'conf' in puppet_cfg:
@@ -165,17 +254,18 @@ def handle(name, cfg, cloud, log, _args):
source=p_constants.conf_path)
for (cfg_name, cfg) in puppet_cfg['conf'].items():
# Cert configuration is a special case
- # Dump the puppet master ca certificate in the correct place
+ # Dump the puppetserver ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
util.ensure_dir(p_constants.ssl_dir, 0o771)
- util.chownbyname(p_constants.ssl_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_dir, puppet_user, 'root')
util.ensure_dir(p_constants.ssl_cert_dir)
- util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root')
util.write_file(p_constants.ssl_cert_path, cfg)
- util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_cert_path,
+ puppet_user, 'root')
else:
# Iterate through the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
@@ -203,6 +293,25 @@ def handle(name, cfg, cloud, log, _args):
# Set it up so it autostarts
_autostart_puppet(log)
+ # Run the agent if needed
+ if run:
+ log.debug('Running puppet-agent')
+ cmd = [puppet_bin, 'agent']
+ if 'exec_args' in puppet_cfg:
+ cmd_args = puppet_cfg['exec_args']
+ if isinstance(cmd_args, (list, tuple)):
+ cmd.extend(cmd_args)
+ elif isinstance(cmd_args, str):
+ cmd.extend(cmd_args.split())
+ else:
+ log.warning("Unknown type %s provided for puppet"
+ " 'exec_args' expected list, tuple,"
+ " or string", type(cmd_args))
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ else:
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ subp.subp(cmd, capture=False)
+
# Start puppetd
subp.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index c51967e2..648935e4 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -108,18 +108,19 @@ def handle(name, cfg, cloud, log, _args):
if "resolv_conf" not in cfg:
log.warning("manage_resolv_conf True but no parameters provided!")
+ return
try:
template_fn = cloud.get_template_filename(
- RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolv_conf_fn])
+ RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn])
except KeyError:
- log.warning("No template found, not rendering /etc/resolv.conf")
+ log.warning("No template found, not rendering resolve configs")
return
generate_resolv_conf(
template_fn=template_fn,
params=cfg["resolv_conf"],
- target_fname=cloud.disro.resolve_conf_fn
+ target_fname=cloud.distro.resolve_conf_fn
)
return
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 67f09686..b7a48dcc 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,8 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** almalinux, centos, fedora, photon, rhel, rocky
+**Supported distros:** almalinux, centos, eurolinux, fedora, photon, rhel,
+ rocky, virtuozzo
**Config keys**::
@@ -36,7 +37,8 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['almalinux', 'centos', 'fedora', 'photon', 'rhel', 'rocky']
+distros = ['almalinux', 'centos', 'eurolinux', 'fedora', 'photon', 'rhel',
+ 'rocky', 'virtuozzo']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 7bdf2197..a634623a 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -49,8 +49,8 @@ OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'freebsd': ['freebsd'],
'gentoo': ['gentoo'],
- 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'photon', 'rhel',
- 'rocky'],
+ 'redhat': ['almalinux', 'amazon', 'centos', 'eurolinux', 'fedora',
+ 'photon', 'rhel', 'rocky', 'virtuozzo'],
'suse': ['opensuse', 'sles'],
}
@@ -147,7 +147,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return uses_systemd()
@abc.abstractmethod
- def package_command(self, cmd, args=None, pkgs=None):
+ def package_command(self, command, args=None, pkgs=None):
raise NotImplementedError()
@abc.abstractmethod
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
index e4bed5a2..73b68baf 100644
--- a/cloudinit/distros/alpine.py
+++ b/cloudinit/distros/alpine.py
@@ -73,18 +73,18 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('add', pkgs=pkglist)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index c9acb11f..3c5bbb38 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -101,18 +101,18 @@ class Distro(distros.Distro):
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), omode="w", mode=0o644)
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), omode="w", mode=0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 089e0c3e..f2b4dfc9 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -115,18 +115,18 @@ class Distro(distros.Distro):
_maybe_remove_legacy_eth0()
return super()._write_network_state(network_state)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/eurolinux.py b/cloudinit/distros/eurolinux.py
new file mode 100644
index 00000000..edb3165d
--- /dev/null
+++ b/cloudinit/distros/eurolinux.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 68c03e7f..1be76dc8 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -149,12 +149,12 @@ class Distro(distros.Distro):
else:
return distros.Distro._bring_up_interfaces(self, device_names)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
@@ -163,8 +163,8 @@ class Distro(distros.Distro):
# Many distro's format is the hostname by itself, and that is the
# way HostnameConf works but gentoo expects it to be in
# hostname="the-actual-hostname"
- conf.set_hostname('hostname="%s"' % your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf.set_hostname('hostname="%s"' % hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index b4193ac2..2a7497cc 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -150,9 +150,9 @@ class Distro(distros.Distro):
host_fn = self.hostname_conf_fn
return (host_fn, self._read_hostname(host_fn))
- def _write_hostname(self, hostname, out_fn):
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
+ def _write_hostname(self, hostname, filename):
+ if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ util.write_file(filename, hostname)
elif self.uses_systemd():
subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
@@ -160,13 +160,13 @@ class Distro(distros.Distro):
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
conf = HostnameConf('')
conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ util.write_file(filename, str(conf), 0o644)
@property
def preferred_ntp_clients(self):
diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py
index 0ced7b5f..4ff90ea6 100644
--- a/cloudinit/distros/photon.py
+++ b/cloudinit/distros/photon.py
@@ -5,6 +5,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+from cloudinit import net
from cloudinit import util
from cloudinit import subp
from cloudinit import distros
@@ -12,13 +13,12 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit.distros import rhel_util as rhutil
-from cloudinit.distros.parsers.hostname import HostnameConf
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- hostname_conf_fn = '/etc/hostname'
+ systemd_hostname_conf_fn = '/etc/hostname'
network_conf_dir = '/etc/systemd/network/'
systemd_locale_conf_fn = '/etc/locale.conf'
resolve_conf_fn = '/etc/systemd/resolved.conf'
@@ -42,17 +42,32 @@ class Distro(distros.Distro):
self.osfamily = 'photon'
self.init_cmd = ['systemctl']
- def exec_cmd(self, cmd, capture=False):
+ def exec_cmd(self, cmd, capture=True):
LOG.debug('Attempting to run: %s', cmd)
try:
(out, err) = subp.subp(cmd, capture=capture)
if err:
LOG.warning('Running %s resulted in stderr output: %s',
cmd, err)
- return True, out, err
+ return True, out, err
+ return False, out, err
except subp.ProcessExecutionError:
util.logexc(LOG, 'Command %s failed', cmd)
- return False, None, None
+ return True, None, None
+
+ def generate_fallback_config(self):
+ key = 'disable_fallback_netcfg'
+ disable_fallback_netcfg = self._cfg.get(key, True)
+ LOG.debug('%s value is: %s', key, disable_fallback_netcfg)
+
+ if not disable_fallback_netcfg:
+ return net.generate_fallback_config()
+
+ LOG.info(
+ 'Skipping generate_fallback_config. Rely on PhotonOS default '
+ 'network config'
+ )
+ return None
def apply_locale(self, locale, out_fn=None):
# This has a dependancy on glibc-i18n, user need to manually install it
@@ -70,41 +85,32 @@ class Distro(distros.Distro):
# For locale change to take effect, reboot is needed or we can restart
# systemd-localed. This is equivalent of localectl
cmd = ['systemctl', 'restart', 'systemd-localed']
- _ret, _out, _err = self.exec_cmd(cmd)
+ self.exec_cmd(cmd)
def install_packages(self, pkglist):
# self.update_package_sources()
self.package_command('install', pkgs=pkglist)
- def _bring_up_interfaces(self, device_names):
- cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved']
- LOG.debug('Attempting to run bring up interfaces using command %s',
- cmd)
- ret, _out, _err = self.exec_cmd(cmd)
- return ret
-
- def _write_hostname(self, hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # Let's see if we can read it first.
- conf = HostnameConf(util.load_file(out_fn))
- conf.parse()
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), mode=0o644)
+ def _write_hostname(self, hostname, filename):
+ if filename and filename.endswith('/previous-hostname'):
+ util.write_file(filename, hostname)
+ else:
+ ret, _out, err = self.exec_cmd(['hostnamectl', 'set-hostname',
+ str(hostname)])
+ if ret:
+ LOG.warning(('Error while setting hostname: %s\n'
+ 'Given hostname: %s', err, hostname))
def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
+ sys_hostname = self._read_hostname(self.systemd_hostname_conf_fn)
+ return (self.systemd_hostname_conf_fn, sys_hostname)
def _read_hostname(self, filename, default=None):
- _ret, out, _err = self.exec_cmd(['hostname'])
+ if filename and filename.endswith('/previous-hostname'):
+ return util.load_file(filename).strip()
- return out if out else default
+ _ret, out, _err = self.exec_cmd(['hostname', '-f'])
+ return out.strip() if out else default
def _get_localhost_ip(self):
return '127.0.1.1'
@@ -113,7 +119,7 @@ class Distro(distros.Distro):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
+ if not pkgs:
pkgs = []
cmd = ['tdnf', '-y']
@@ -127,8 +133,9 @@ class Distro(distros.Distro):
pkglist = util.expand_package_list('%s-%s', pkgs)
cmd.extend(pkglist)
- # Allow the output of this to flow outwards (ie not be captured)
- _ret, _out, _err = self.exec_cmd(cmd, capture=False)
+ ret, _out, err = self.exec_cmd(cmd)
+ if ret:
+ LOG.error('Error while installing packages: %s', err)
def update_package_sources(self):
self._runner.run('update-sources', self.package_command,
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index be5b3d24..c9ee2747 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -78,18 +78,18 @@ class Distro(distros.Distro):
}
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
- def _write_hostname(self, hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
# systemd will never update previous-hostname for us, so
# we need to do it ourselves
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
+ if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ util.write_file(filename, hostname)
elif self.uses_systemd():
subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
'HOSTNAME': hostname,
}
- rhel_util.update_sysconfig_file(out_fn, host_cfg)
+ rhel_util.update_sysconfig_file(filename, host_cfg)
def _read_system_hostname(self):
if self.uses_systemd():
diff --git a/cloudinit/distros/virtuozzo.py b/cloudinit/distros/virtuozzo.py
new file mode 100644
index 00000000..edb3165d
--- /dev/null
+++ b/cloudinit/distros/virtuozzo.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index b827d41a..017c50c5 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -313,11 +313,11 @@ def is_netfail_standby(devname, driver=None):
def is_renamed(devname):
"""
/* interface name assignment types (sysfs name_assign_type attribute) */
- #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
- #define NET_NAME_ENUM 1 /* enumerated by kernel */
- #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
- #define NET_NAME_USER 3 /* provided by user-space */
- #define NET_NAME_RENAMED 4 /* renamed by user-space */
+ #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
+ #define NET_NAME_ENUM 1 /* enumerated by kernel */
+ #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
+ #define NET_NAME_USER 3 /* provided by user-space */
+ #define NET_NAME_RENAMED 4 /* renamed by user-space */
"""
name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
if name_assign_type and name_assign_type in ['3', '4']:
@@ -661,6 +661,8 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
cur['name'] = name
cur_info[name] = cur
+ LOG.debug("Detected interfaces %s", cur_info)
+
def update_byname(bymac):
return dict((data['name'], data)
for data in cur_info.values())
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
index 84aaafc9..11149548 100644
--- a/cloudinit/net/activators.py
+++ b/cloudinit/net/activators.py
@@ -8,6 +8,7 @@ from cloudinit import subp
from cloudinit import util
from cloudinit.net.eni import available as eni_available
from cloudinit.net.netplan import available as netplan_available
+from cloudinit.net.networkd import available as networkd_available
from cloudinit.net.network_state import NetworkState
from cloudinit.net.sysconfig import NM_CFG_FILE
@@ -213,12 +214,38 @@ class NetplanActivator(NetworkActivator):
return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+class NetworkdActivator(NetworkActivator):
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if ifupdown can be used on this system."""
+ return networkd_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """ Return True is successful, otherwise return False """
+ cmd = ['ip', 'link', 'set', 'up', device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_up_all_interfaces(network_state: NetworkState) -> bool:
+ """ Return True is successful, otherwise return False """
+ cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved']
+ return _alter_interface(cmd, 'all')
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """ Return True is successful, otherwise return False """
+ cmd = ['ip', 'link', 'set', 'down', device_name]
+ return _alter_interface(cmd, device_name)
+
+
# This section is mostly copied and pasted from renderers.py. An abstract
# version to encompass both seems overkill at this point
DEFAULT_PRIORITY = [
IfUpDownActivator,
NetworkManagerActivator,
NetplanActivator,
+ NetworkdActivator,
]
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index 2dffce59..a311572f 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -72,8 +72,8 @@ class Renderer(renderer.Renderer):
def __init__(self, config=None):
if not config:
config = {}
- self.resolved_conf = config.get('resolved_conf_fn',
- '/etc/systemd/resolved.conf')
+ self.resolve_conf_fn = config.get('resolve_conf_fn',
+ '/etc/systemd/resolved.conf')
self.network_conf_dir = config.get('network_conf_dir',
'/etc/systemd/network/')
@@ -246,7 +246,7 @@ class Renderer(renderer.Renderer):
def available(target=None):
- expected = ['systemctl']
+ expected = ['ip', 'systemctl']
search = ['/usr/bin', '/bin']
for p in expected:
if not subp.which(p, search=search, target=target):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 8031cd3a..06f7255e 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -18,7 +18,8 @@ from .network_state import (
is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES)
LOG = logging.getLogger(__name__)
-KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse']
+KNOWN_DISTROS = ['almalinux', 'centos', 'eurolinux', 'fedora', 'rhel', 'rocky',
+ 'suse', 'virtuozzo']
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
index 2df9441a..186d8ad8 100644
--- a/cloudinit/patcher.py
+++ b/cloudinit/patcher.py
@@ -6,7 +6,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import imp
import logging
import sys
@@ -20,7 +19,7 @@ class QuietStreamHandler(logging.StreamHandler):
pass
-def _patch_logging():
+def patch_logging():
# Replace 'handleError' with one that will be more
# tolerant of errors in that it can avoid
# re-notifying on exceptions and when errors
@@ -37,12 +36,4 @@ def _patch_logging():
pass
setattr(logging.Handler, 'handleError', handleError)
-
-def patch():
- imp.acquire_lock()
- try:
- _patch_logging()
- finally:
- imp.release_lock()
-
# vi: ts=4 expandtab
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 23e4c0ad..f69005ea 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -43,6 +43,7 @@ CFG_BUILTIN = {
'Exoscale',
'RbxCloud',
'UpCloud',
+ 'VMware',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index dcdf9f8f..ba23139b 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -45,7 +45,8 @@ from cloudinit.sources.helpers.azure import (
is_byte_swapped,
dhcp_log_cb,
push_log_to_kvp,
- report_failure_to_fabric)
+ report_failure_to_fabric,
+ build_minimal_ovf)
LOG = logging.getLogger(__name__)
@@ -76,7 +77,7 @@ REPROVISION_NIC_ATTACH_MARKER_FILE = "/var/lib/cloud/data/wait_for_nic_attach"
REPROVISION_NIC_DETACHED_MARKER_FILE = "/var/lib/cloud/data/nic_detached"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
AGENT_SEED_DIR = '/var/lib/waagent'
-
+DEFAULT_PROVISIONING_ISO_DEV = '/dev/sr0'
# In the event where the IMDS primary server is not
# available, it takes 1s to fallback to the secondary one
@@ -428,148 +429,154 @@ class DataSourceAzure(sources.DataSource):
# it determines the value of ret. More specifically, the first one in
# the candidate list determines the path to take in order to get the
# metadata we need.
- candidates = [self.seed_dir]
+ reprovision = False
+ ovf_is_accessible = True
+ reprovision_after_nic_attach = False
+ metadata_source = None
+ ret = None
if os.path.isfile(REPROVISION_MARKER_FILE):
- candidates.insert(0, "IMDS")
+ reprovision = True
+ metadata_source = "IMDS"
report_diagnostic_event("Reprovision marker file already present "
"before crawling Azure metadata: %s" %
REPROVISION_MARKER_FILE,
logger_func=LOG.debug)
elif os.path.isfile(REPROVISION_NIC_ATTACH_MARKER_FILE):
- candidates.insert(0, "NIC_ATTACH_MARKER_PRESENT")
+ reprovision_after_nic_attach = True
+ metadata_source = "NIC_ATTACH_MARKER_PRESENT"
report_diagnostic_event("Reprovision nic attach marker file "
"already present before crawling Azure "
"metadata: %s" %
REPROVISION_NIC_ATTACH_MARKER_FILE,
logger_func=LOG.debug)
- candidates.extend(list_possible_azure_ds_devs())
- if ddir:
- candidates.append(ddir)
-
- found = None
- reprovision = False
- ovf_is_accessible = True
- reprovision_after_nic_attach = False
- for cdev in candidates:
- try:
- LOG.debug("cdev: %s", cdev)
- if cdev == "IMDS":
- ret = None
- reprovision = True
- elif cdev == "NIC_ATTACH_MARKER_PRESENT":
- ret = None
- reprovision_after_nic_attach = True
- elif cdev.startswith("/dev/"):
- if util.is_FreeBSD():
- ret = util.mount_cb(cdev, load_azure_ds_dir,
- mtype="udf")
+ else:
+ for src in list_possible_azure_ds(self.seed_dir, ddir):
+ try:
+ if src.startswith("/dev/"):
+ if util.is_FreeBSD():
+ ret = util.mount_cb(src, load_azure_ds_dir,
+ mtype="udf")
+ else:
+ ret = util.mount_cb(src, load_azure_ds_dir)
+ # save the device for ejection later
+ self.iso_dev = src
+ ovf_is_accessible = True
else:
- ret = util.mount_cb(cdev, load_azure_ds_dir)
- else:
- ret = load_azure_ds_dir(cdev)
-
- except NonAzureDataSource:
- report_diagnostic_event(
- "Did not find Azure data source in %s" % cdev,
- logger_func=LOG.debug)
- continue
- except BrokenAzureDataSource as exc:
- msg = 'BrokenAzureDataSource: %s' % exc
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
- except util.MountFailedError:
- report_diagnostic_event(
- '%s was not mountable' % cdev, logger_func=LOG.debug)
- cdev = 'IMDS'
- ovf_is_accessible = False
- empty_md = {'local-hostname': ''}
- empty_cfg = dict(
- system_info=dict(
- default_user=dict(
- name=''
+ ret = load_azure_ds_dir(src)
+ metadata_source = src
+ break
+ except NonAzureDataSource:
+ report_diagnostic_event(
+ "Did not find Azure data source in %s" % src,
+ logger_func=LOG.debug)
+ continue
+ except util.MountFailedError:
+ report_diagnostic_event(
+ '%s was not mountable' % src,
+ logger_func=LOG.debug)
+ ovf_is_accessible = False
+ empty_md = {'local-hostname': ''}
+ empty_cfg = dict(
+ system_info=dict(
+ default_user=dict(
+ name=''
+ )
)
)
- )
- ret = (empty_md, '', empty_cfg, {})
-
- report_diagnostic_event("Found provisioning metadata in %s" % cdev,
- logger_func=LOG.debug)
+ ret = (empty_md, '', empty_cfg, {})
+ metadata_source = 'IMDS'
+ continue
+ except BrokenAzureDataSource as exc:
+ msg = 'BrokenAzureDataSource: %s' % exc
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
- # save the iso device for ejection before reporting ready
- if cdev.startswith("/dev"):
- self.iso_dev = cdev
+ report_diagnostic_event(
+ "Found provisioning metadata in %s" % metadata_source,
+ logger_func=LOG.debug)
- perform_reprovision = reprovision or self._should_reprovision(ret)
- perform_reprovision_after_nic_attach = (
- reprovision_after_nic_attach or
- self._should_reprovision_after_nic_attach(ret))
+ perform_reprovision = reprovision or self._should_reprovision(ret)
+ perform_reprovision_after_nic_attach = (
+ reprovision_after_nic_attach or
+ self._should_reprovision_after_nic_attach(ret))
- if perform_reprovision or perform_reprovision_after_nic_attach:
- if util.is_FreeBSD():
- msg = "Free BSD is not supported for PPS VMs"
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
- if perform_reprovision_after_nic_attach:
- self._wait_for_all_nics_ready()
- ret = self._reprovision()
+ if perform_reprovision or perform_reprovision_after_nic_attach:
+ if util.is_FreeBSD():
+ msg = "Free BSD is not supported for PPS VMs"
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
+ if perform_reprovision_after_nic_attach:
+ self._wait_for_all_nics_ready()
+ ret = self._reprovision()
- imds_md = self.get_imds_data_with_api_fallback(
- self.fallback_interface,
- retries=10
+ imds_md = self.get_imds_data_with_api_fallback(
+ self.fallback_interface,
+ retries=10
+ )
+ if not imds_md and not ovf_is_accessible:
+ msg = 'No OVF or IMDS available'
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
+ (md, userdata_raw, cfg, files) = ret
+ self.seed = metadata_source
+ crawled_data.update({
+ 'cfg': cfg,
+ 'files': files,
+ 'metadata': util.mergemanydict(
+ [md, {'imds': imds_md}]),
+ 'userdata_raw': userdata_raw})
+ imds_username = _username_from_imds(imds_md)
+ imds_hostname = _hostname_from_imds(imds_md)
+ imds_disable_password = _disable_password_from_imds(imds_md)
+ if imds_username:
+ LOG.debug('Username retrieved from IMDS: %s', imds_username)
+ cfg['system_info']['default_user']['name'] = imds_username
+ if imds_hostname:
+ LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
+ crawled_data['metadata']['local-hostname'] = imds_hostname
+ if imds_disable_password:
+ LOG.debug(
+ 'Disable password retrieved from IMDS: %s',
+ imds_disable_password
)
- if not imds_md and not ovf_is_accessible:
- msg = 'No OVF or IMDS available'
- report_diagnostic_event(msg)
- raise sources.InvalidMetaDataException(msg)
- (md, userdata_raw, cfg, files) = ret
- self.seed = cdev
- crawled_data.update({
- 'cfg': cfg,
- 'files': files,
- 'metadata': util.mergemanydict(
- [md, {'imds': imds_md}]),
- 'userdata_raw': userdata_raw})
- imds_username = _username_from_imds(imds_md)
- imds_hostname = _hostname_from_imds(imds_md)
- imds_disable_password = _disable_password_from_imds(imds_md)
- if imds_username:
- LOG.debug('Username retrieved from IMDS: %s', imds_username)
- cfg['system_info']['default_user']['name'] = imds_username
- if imds_hostname:
- LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
- crawled_data['metadata']['local-hostname'] = imds_hostname
- if imds_disable_password:
- LOG.debug(
- 'Disable password retrieved from IMDS: %s',
- imds_disable_password
- )
- crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
-
- # only use userdata from imds if OVF did not provide custom data
- # userdata provided by IMDS is always base64 encoded
- if not userdata_raw:
- imds_userdata = _userdata_from_imds(imds_md)
- if imds_userdata:
- LOG.debug("Retrieved userdata from IMDS")
- try:
- crawled_data['userdata_raw'] = base64.b64decode(
- ''.join(imds_userdata.split()))
- except Exception:
- report_diagnostic_event(
- "Bad userdata in IMDS",
- logger_func=LOG.warning)
- found = cdev
+ crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
- report_diagnostic_event(
- 'found datasource in %s' % cdev, logger_func=LOG.debug)
- break
+ if metadata_source == 'IMDS' and not crawled_data['files']:
+ try:
+ contents = build_minimal_ovf(
+ username=imds_username,
+ hostname=imds_hostname,
+ disableSshPwd=imds_disable_password)
+ crawled_data['files'] = {'ovf-env.xml': contents}
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed to construct OVF from IMDS data %s" % e,
+ logger_func=LOG.debug)
+
+ # only use userdata from imds if OVF did not provide custom data
+ # userdata provided by IMDS is always base64 encoded
+ if not userdata_raw:
+ imds_userdata = _userdata_from_imds(imds_md)
+ if imds_userdata:
+ LOG.debug("Retrieved userdata from IMDS")
+ try:
+ crawled_data['userdata_raw'] = base64.b64decode(
+ ''.join(imds_userdata.split()))
+ except Exception:
+ report_diagnostic_event(
+ "Bad userdata in IMDS",
+ logger_func=LOG.warning)
- if not found:
+ if not metadata_source:
msg = 'No Azure metadata found'
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
+ else:
+ report_diagnostic_event(
+ 'found datasource in %s' % metadata_source,
+ logger_func=LOG.debug)
- if found == ddir:
+ if metadata_source == ddir:
report_diagnostic_event(
"using files cached in %s" % ddir, logger_func=LOG.debug)
@@ -916,6 +923,16 @@ class DataSourceAzure(sources.DataSource):
sleep(sleep_duration)
+ # Since we just did a unbind and bind, check again after sleep
+ # but before doing unbind and bind again to avoid races where the
+ # link might take a slight delay after bind to be up.
+ if self.distro.networking.is_up(ifname):
+ msg = ("Link is up after checking after sleeping for %d secs"
+ " after %d attempts" %
+ (sleep_duration, attempts))
+ report_diagnostic_event(msg, logger_func=LOG.info)
+ return
+
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
path = REPORTED_READY_MARKER_FILE
@@ -965,7 +982,7 @@ class DataSourceAzure(sources.DataSource):
imds_md = None
metadata_poll_count = 0
metadata_logging_threshold = 1
- metadata_timeout_count = 0
+ expected_errors_count = 0
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
@@ -991,13 +1008,16 @@ class DataSourceAzure(sources.DataSource):
raise
# Retry polling network metadata for a limited duration only when the
- # calls fail due to timeout. This is because the platform drops packets
- # going towards IMDS when it is not a primary nic. If the calls fail
- # due to other issues like 410, 503 etc, then it means we are primary
- # but IMDS service is unavailable at the moment. Retry indefinitely in
- # those cases since we cannot move on without the network metadata.
+ # calls fail due to network unreachable error or timeout.
+ # This is because the platform drops packets going towards IMDS
+ # when it is not a primary nic. If the calls fail due to other issues
+ # like 410, 503 etc, then it means we are primary but IMDS service
+ # is unavailable at the moment. Retry indefinitely in those cases
+ # since we cannot move on without the network metadata. In the future,
+ # all this will not be necessary, as a new dhcp option would tell
+ # whether the nic is primary or not.
def network_metadata_exc_cb(msg, exc):
- nonlocal metadata_timeout_count, metadata_poll_count
+ nonlocal expected_errors_count, metadata_poll_count
nonlocal metadata_logging_threshold
metadata_poll_count = metadata_poll_count + 1
@@ -1017,9 +1037,13 @@ class DataSourceAzure(sources.DataSource):
(msg, exc.cause, exc.code),
logger_func=LOG.error)
- if exc.cause and isinstance(exc.cause, requests.Timeout):
- metadata_timeout_count = metadata_timeout_count + 1
- return (metadata_timeout_count <= 10)
+ # Retry up to a certain limit for both timeout and network
+ # unreachable errors.
+ if exc.cause and isinstance(
+ exc.cause, (requests.Timeout, requests.ConnectionError)
+ ):
+ expected_errors_count = expected_errors_count + 1
+ return (expected_errors_count <= 10)
return True
# Primary nic detection will be optimized in the future. The fact that
@@ -2084,18 +2108,18 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
@azure_ds_telemetry_reporter
-def list_possible_azure_ds_devs():
- devlist = []
+def list_possible_azure_ds(seed, cache_dir):
+ yield seed
+ yield DEFAULT_PROVISIONING_ISO_DEV
if util.is_FreeBSD():
cdrom_dev = "/dev/cd0"
if _check_freebsd_cdrom(cdrom_dev):
- return [cdrom_dev]
+ yield cdrom_dev
else:
for fstype in ("iso9660", "udf"):
- devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
-
- devlist.sort(reverse=True)
- return devlist
+ yield from util.find_devs_with("TYPE=%s" % fstype)
+ if cache_dir:
+ yield cache_dir
@azure_ds_telemetry_reporter
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 9e83dccc..e909f058 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -358,8 +358,11 @@ class DataSourceOVF(sources.DataSource):
if contents:
break
if contents:
- (md, ud, cfg) = read_ovf_environment(contents)
+ read_network = ('com.vmware.guestinfo' == name)
+ (md, ud, cfg) = read_ovf_environment(contents, read_network)
self.environment = contents
+ if 'network-config' in md and md['network-config']:
+ self._network_config = md['network-config']
found.append(name)
# There was no OVF transports found
@@ -507,13 +510,14 @@ def read_vmware_imc(config):
# This will return a dict with some content
# meta-data, user-data, some config
-def read_ovf_environment(contents):
+def read_ovf_environment(contents, read_network=False):
props = get_properties(contents)
md = {}
cfg = {}
ud = None
cfg_props = ['password']
md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
+ network_props = ['network-config']
for (prop, val) in props.items():
if prop == 'hostname':
prop = "local-hostname"
@@ -521,6 +525,12 @@ def read_ovf_environment(contents):
md[prop] = val
elif prop in cfg_props:
cfg[prop] = val
+ elif prop in network_props and read_network:
+ try:
+ network_config = base64.b64decode(val.encode())
+ md[prop] = safeload_yaml_or_dict(network_config).get('network')
+ except Exception:
+ LOG.debug("Ignore network-config in wrong format")
elif prop == "user-data":
try:
ud = base64.b64decode(val.encode())
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
new file mode 100644
index 00000000..22ca63de
--- /dev/null
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -0,0 +1,871 @@
+# Cloud-Init DataSource for VMware
+#
+# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Anish Swaminathan <anishs@vmware.com>
+# Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Cloud-Init DataSource for VMware
+
+This module provides a cloud-init datasource for VMware systems and supports
+multiple transports types, including:
+
+ * EnvVars
+ * GuestInfo
+
+Netifaces (https://github.com/al45tair/netifaces)
+
+ Please note this module relies on the netifaces project to introspect the
+ runtime, network configuration of the host on which this datasource is
+ running. This is in contrast to the rest of cloud-init which uses the
+ cloudinit/netinfo module.
+
+ The reasons for using netifaces include:
+
+ * Netifaces is built in C and is more portable across multiple systems
+ and more deterministic than shell exec'ing local network commands and
+ parsing their output.
+
+ * Netifaces provides a stable way to determine the view of the host's
+ network after DHCP has brought the network online. Unlike most other
+ datasources, this datasource still provides support for JINJA queries
+ based on networking information even when the network is based on a
+ DHCP lease. While this does not tie this datasource directly to
+ netifaces, it does mean the ability to consistently obtain the
+ correct information is paramount.
+
+ * It is currently possible to execute this datasource on macOS
+ (which many developers use today) to print the output of the
+ get_host_info function. This function calls netifaces to obtain
+ the same runtime network configuration that the datasource would
+ persist to the local system's instance data.
+
+ However, the netinfo module fails on macOS. The result is either a
+ hung operation that requires a SIGINT to return control to the user,
+ or, if brew is used to install iproute2mac, the ip commands are used
+ but produce output the netinfo module is unable to parse.
+
+ While macOS is not a target of cloud-init, this feature is quite
+ useful when working on this datasource.
+
+ For more information about this behavior, please see the following
+ PR comment, https://bit.ly/3fG7OVh.
+
+ The authors of this datasource are not opposed to moving away from
+ netifaces. The goal may be to eventually do just that. This proviso was
+ added to the top of this module as a way to remind future-us and others
+ why netifaces was used in the first place in order to either smooth the
+ transition away from netifaces or embrace it further up the cloud-init
+ stack.
+"""
+
+import collections
+import copy
+from distutils.spawn import find_executable
+import ipaddress
+import json
+import os
+import socket
+import time
+
+from cloudinit import dmi, log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.subp import subp, ProcessExecutionError
+
+import netifaces
+
+
+PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
+
+LOG = logging.getLogger(__name__)
+NOVAL = "No value found"
+
+DATA_ACCESS_METHOD_ENVVAR = "envvar"
+DATA_ACCESS_METHOD_GUESTINFO = "guestinfo"
+
+VMWARE_RPCTOOL = find_executable("vmware-rpctool")
+REDACT = "redact"
+CLEANUP_GUESTINFO = "cleanup-guestinfo"
+VMX_GUESTINFO = "VMX_GUESTINFO"
+GUESTINFO_EMPTY_YAML_VAL = "---"
+
+LOCAL_IPV4 = "local-ipv4"
+LOCAL_IPV6 = "local-ipv6"
+WAIT_ON_NETWORK = "wait-on-network"
+WAIT_ON_NETWORK_IPV4 = "ipv4"
+WAIT_ON_NETWORK_IPV6 = "ipv6"
+
+
+class DataSourceVMware(sources.DataSource):
+ """
+ Setting the hostname:
+ The hostname is set by way of the metadata key "local-hostname".
+
+ Setting the instance ID:
+ The instance ID may be set by way of the metadata key "instance-id".
+ However, if this value is absent then the instance ID is read
+ from the file /sys/class/dmi/id/product_uuid.
+
+ Configuring the network:
+ The network is configured by setting the metadata key "network"
+ with a value consistent with Network Config Versions 1 or 2,
+ depending on the Linux distro's version of cloud-init:
+
+ Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1
+ Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2
+
+ For example, CentOS 7's official cloud-init package is version
+ 0.7.9 and does not support Network Config Version 2. However,
+ this datasource still supports supplying Network Config Version 2
+ data as long as the Linux distro's cloud-init package is new
+ enough to parse the data.
+
+ The metadata key "network.encoding" may be used to indicate the
+ format of the metadata key "network". Valid encodings are base64
+ and gzip+base64.
+ """
+
+ dsname = "VMware"
+
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+
+ self.data_access_method = None
+ self.vmware_rpctool = VMWARE_RPCTOOL
+
+ def _get_data(self):
+ """
+ _get_data loads the metadata, userdata, and vendordata from one of
+ the following locations in the given order:
+
+ * envvars
+ * guestinfo
+
+ Please note when updating this function with support for new data
+ transports, the order should match the order in the dscheck_VMware
+ function from the file ds-identify.
+ """
+
+ # Initialize the locally scoped metadata, userdata, and vendordata
+ # variables. They are assigned below depending on the detected data
+ # access method.
+ md, ud, vd = None, None, None
+
+ # First check to see if there is data via env vars.
+ if os.environ.get(VMX_GUESTINFO, ""):
+ md = guestinfo_envvar("metadata")
+ ud = guestinfo_envvar("userdata")
+ vd = guestinfo_envvar("vendordata")
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_ENVVAR
+
+ # At this point, all additional data transports are valid only on
+ # a VMware platform.
+ if not self.data_access_method:
+ system_type = dmi.read_dmi_data("system-product-name")
+ if system_type is None:
+ LOG.debug("No system-product-name found")
+ return False
+ if "vmware" not in system_type.lower():
+ LOG.debug("Not a VMware platform")
+ return False
+
+ # If no data was detected, check the guestinfo transport next.
+ if not self.data_access_method:
+ if self.vmware_rpctool:
+ md = guestinfo("metadata", self.vmware_rpctool)
+ ud = guestinfo("userdata", self.vmware_rpctool)
+ vd = guestinfo("vendordata", self.vmware_rpctool)
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO
+
+ if not self.data_access_method:
+ LOG.error("failed to find a valid data access method")
+ return False
+
+ LOG.info("using data access method %s", self._get_subplatform())
+
+ # Get the metadata.
+ self.metadata = process_metadata(load_json_or_yaml(md))
+
+ # Get the user data.
+ self.userdata_raw = ud
+
+ # Get the vendor data.
+ self.vendordata_raw = vd
+
+ # Redact any sensitive information.
+ self.redact_keys()
+
+ # get_data returns true if there is any available metadata,
+ # userdata, or vendordata.
+ if self.metadata or self.userdata_raw or self.vendordata_raw:
+ return True
+ else:
+ return False
+
+ def setup(self, is_new_instance):
+ """setup(is_new_instance)
+
+ This is called before user-data and vendor-data have been processed.
+
+ Unless the datasource has set mode to 'local', then networking
+ per 'fallback' or per 'network_config' will have been written and
+ brought up the OS at this point.
+ """
+
+ host_info = wait_on_network(self.metadata)
+ LOG.info("got host-info: %s", host_info)
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ advertise_local_ip_addrs(host_info)
+
+ # Ensure the metadata gets updated with information about the
+ # host, including the network interfaces, default IP addresses,
+ # etc.
+ self.metadata = util.mergemanydict([self.metadata, host_info])
+
+ # Persist the instance data for versions of cloud-init that support
+ # doing so. This occurs here rather than in the get_data call in
+ # order to ensure that the network interfaces are up and can be
+ # persisted with the metadata.
+ self.persist_instance_data()
+
+ def _get_subplatform(self):
+ get_key_name_fn = None
+ if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR:
+ get_key_name_fn = get_guestinfo_envvar_key_name
+ elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ get_key_name_fn = get_guestinfo_key_name
+ else:
+ return sources.METADATA_UNKNOWN
+
+ return "%s (%s)" % (
+ self.data_access_method,
+ get_key_name_fn("metadata"),
+ )
+
+ @property
+ def network_config(self):
+ if "network" in self.metadata:
+ LOG.debug("using metadata network config")
+ else:
+ LOG.debug("using fallback network config")
+ self.metadata["network"] = {
+ "config": self.distro.generate_fallback_config(),
+ }
+ return self.metadata["network"]["config"]
+
+ def get_instance_id(self):
+ # Pull the instance ID out of the metadata if present. Otherwise
+ # read the file /sys/class/dmi/id/product_uuid for the instance ID.
+ if self.metadata and "instance-id" in self.metadata:
+ return self.metadata["instance-id"]
+ with open(PRODUCT_UUID_FILE_PATH, "r") as id_file:
+ self.metadata["instance-id"] = str(id_file.read()).rstrip().lower()
+ return self.metadata["instance-id"]
+
+ def get_public_ssh_keys(self):
+ for key_name in (
+ "public-keys-data",
+ "public_keys_data",
+ "public-keys",
+ "public_keys",
+ ):
+ if key_name in self.metadata:
+ return sources.normalize_pubkey_data(self.metadata[key_name])
+ return []
+
+ def redact_keys(self):
+ # Determine if there are any keys to redact.
+ keys_to_redact = None
+ if REDACT in self.metadata:
+ keys_to_redact = self.metadata[REDACT]
+ elif CLEANUP_GUESTINFO in self.metadata:
+ # This is for backwards compatibility.
+ keys_to_redact = self.metadata[CLEANUP_GUESTINFO]
+
+ if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool)
+
+
+def decode(key, enc_type, data):
+ """
+ decode returns the decoded string value of data
+ key is a string used to identify the data being decoded in log messages
+ """
+ LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type)
+
+ raw_data = None
+ if enc_type in ["gzip+base64", "gz+b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.decomp_gzip(util.b64d(data))
+ elif enc_type in ["base64", "b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.b64d(data)
+ else:
+ LOG.debug("Plain-text data %s", key)
+ raw_data = data
+
+ return util.decode_binary(raw_data)
+
+
+def get_none_if_empty_val(val):
+ """
+ get_none_if_empty_val returns None if the provided value, once stripped
+ of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL.
+
+ The return value is always a string, regardless of whether the input is
+ a bytes class or a string.
+ """
+
+ # If the provided value is a bytes class, convert it to a string to
+ # simplify the rest of this function's logic.
+ val = util.decode_binary(val)
+ val = val.rstrip()
+ if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL:
+ return None
+ return val
+
+
+def advertise_local_ip_addrs(host_info):
+ """
+ advertise_local_ip_addrs gets the local IP address information from
+ the provided host_info map and sets the addresses in the guestinfo
+ namespace
+ """
+ if not host_info:
+ return
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ local_ipv4 = host_info.get(LOCAL_IPV4)
+ if local_ipv4:
+ guestinfo_set_value(LOCAL_IPV4, local_ipv4)
+ LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4)
+
+ local_ipv6 = host_info.get(LOCAL_IPV6)
+ if local_ipv6:
+ guestinfo_set_value(LOCAL_IPV6, local_ipv6)
+ LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6)
+
+
+def handle_returned_guestinfo_val(key, val):
+ """
+ handle_returned_guestinfo_val returns the provided value if it is
+ not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is
+ returned
+ """
+ val = get_none_if_empty_val(val)
+ if val:
+ return val
+ LOG.debug("No value found for key %s", key)
+ return None
+
+
+def get_guestinfo_key_name(key):
+ return "guestinfo." + key
+
+
+def get_guestinfo_envvar_key_name(key):
+ return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1)
+
+
+def guestinfo_envvar(key):
+ val = guestinfo_envvar_get_value(key)
+ if not val:
+ return None
+ enc_type = guestinfo_envvar_get_value(key + ".encoding")
+ return decode(get_guestinfo_envvar_key_name(key), enc_type, val)
+
+
+def guestinfo_envvar_get_value(key):
+ env_key = get_guestinfo_envvar_key_name(key)
+ return handle_returned_guestinfo_val(key, os.environ.get(env_key, ""))
+
+
+def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo returns the guestinfo value for the provided key, decoding
+ the value when required
+ """
+ val = guestinfo_get_value(key, vmware_rpctool)
+ if not val:
+ return None
+ enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool)
+ return decode(get_guestinfo_key_name(key), enc_type, val)
+
+
+def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Returns a guestinfo value for the specified key.
+ """
+ LOG.debug("Getting guestinfo value for key %s", key)
+
+ try:
+ (stdout, stderr) = subp(
+ [
+ vmware_rpctool,
+ "info-get " + get_guestinfo_key_name(key),
+ ]
+ )
+ if stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ elif not stdout:
+ LOG.error("Failed to get guestinfo value for key %s", key)
+ return handle_returned_guestinfo_val(key, stdout)
+ except ProcessExecutionError as error:
+ if error.stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ else:
+ util.logexc(
+ LOG,
+ "Failed to get guestinfo value for key %s: %s",
+ key,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to get "
+ + "guestinfo value for key %s",
+ key,
+ )
+
+ return None
+
+
+def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Sets a guestinfo value for the specified key. Set value to an empty string
+ to clear an existing guestinfo key.
+ """
+
+ # If value is an empty string then set it to a single space as it is not
+ # possible to set a guestinfo key to an empty string. Setting a guestinfo
+ # key to a single space is as close as it gets to clearing an existing
+ # guestinfo key.
+ if value == "":
+ value = " "
+
+ LOG.debug("Setting guestinfo key=%s to value=%s", key, value)
+
+ try:
+ subp(
+ [
+ vmware_rpctool,
+ ("info-set %s %s" % (get_guestinfo_key_name(key), value)),
+ ]
+ )
+ return True
+ except ProcessExecutionError as error:
+ util.logexc(
+ LOG,
+ "Failed to set guestinfo key=%s to value=%s: %s",
+ key,
+ value,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to set "
+ + "guestinfo key=%s to value=%s",
+ key,
+ value,
+ )
+
+ return None
+
+
+def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo_redact_keys redacts guestinfo of all of the keys in the given
+ list. each key will have its value set to "---". Since the value is valid
+ YAML, cloud-init can still read it if it tries.
+ """
+ if not keys:
+ return
+ if not type(keys) in (list, tuple):
+ keys = [keys]
+ for key in keys:
+ key_name = get_guestinfo_key_name(key)
+ LOG.info("clearing %s", key_name)
+ if not guestinfo_set_value(
+ key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool
+ ):
+ LOG.error("failed to clear %s", key_name)
+ LOG.info("clearing %s.encoding", key_name)
+ if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool):
+ LOG.error("failed to clear %s.encoding", key_name)
+
+
+def load_json_or_yaml(data):
+ """
+ load first attempts to unmarshal the provided data as JSON, and if
+ that fails then attempts to unmarshal the data as YAML. If data is
+ None then a new dictionary is returned.
+ """
+ if not data:
+ return {}
+ try:
+ return util.load_json(data)
+ except (json.JSONDecodeError, TypeError):
+ return util.load_yaml(data)
+
+
+def process_metadata(data):
+ """
+ process_metadata processes metadata and loads the optional network
+ configuration.
+ """
+ network = None
+ if "network" in data:
+ network = data["network"]
+ del data["network"]
+
+ network_enc = None
+ if "network.encoding" in data:
+ network_enc = data["network.encoding"]
+ del data["network.encoding"]
+
+ if network:
+ if isinstance(network, collections.abc.Mapping):
+ LOG.debug("network data copied to 'config' key")
+ network = {"config": copy.deepcopy(network)}
+ else:
+ LOG.debug("network data to be decoded %s", network)
+ dec_net = decode("metadata.network", network_enc, network)
+ network = {
+ "config": load_json_or_yaml(dec_net),
+ }
+
+ LOG.debug("network data %s", network)
+ data["network"] = network
+
+ return data
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local
+ (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+def get_datasource_list(depends):
+ """
+ Return a list of data sources that match this set of dependencies
+ """
+ return sources.list_from_depends(depends, datasources)
+
+
+def get_default_ip_addrs():
+ """
+ Returns the default IPv4 and IPv6 addresses based on the device(s) used for
+ the default route. Please note that None may be returned for either address
+ family if that family has no default route or if there are multiple
+ addresses associated with the device used by the default route for a given
+ address.
+ """
+ # TODO(promote and use netifaces in cloudinit.net* modules)
+ gateways = netifaces.gateways()
+ if "default" not in gateways:
+ return None, None
+
+ default_gw = gateways["default"]
+ if (
+ netifaces.AF_INET not in default_gw
+ and netifaces.AF_INET6 not in default_gw
+ ):
+ return None, None
+
+ ipv4 = None
+ ipv6 = None
+
+ gw4 = default_gw.get(netifaces.AF_INET)
+ if gw4:
+ _, dev4 = gw4
+ addr4_fams = netifaces.ifaddresses(dev4)
+ if addr4_fams:
+ af_inet4 = addr4_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev4,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ # Try to get the default IPv6 address by first seeing if there is a default
+ # IPv6 route.
+ gw6 = default_gw.get(netifaces.AF_INET6)
+ if gw6:
+ _, dev6 = gw6
+ addr6_fams = netifaces.ifaddresses(dev6)
+ if addr6_fams:
+ af_inet6 = addr6_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev6,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv4 address but not IPv6, then see if there is a
+ # single IPv6 address associated with the same device associated with the
+ # default IPv4 address.
+ if ipv4 and not ipv6:
+ af_inet6 = addr4_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev4,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv6 address but not IPv4, then see if there is a
+ # single IPv4 address associated with the same device associated with the
+ # default IPv6 address.
+ if not ipv4 and ipv6:
+ af_inet4 = addr6_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev6,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ return ipv4, ipv6
+
+
+# patched socket.getfqdn() - see https://bugs.python.org/issue5004
+
+
+def getfqdn(name=""):
+ """Get fully qualified domain name from name.
+ An empty argument is interpreted as meaning the local host.
+ """
+ # TODO(may want to promote this function to util.getfqdn)
+ # TODO(may want to extend util.get_hostname to accept fqdn=True param)
+ name = name.strip()
+ if not name or name == "0.0.0.0":
+ name = util.get_hostname()
+ try:
+ addrs = socket.getaddrinfo(
+ name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME
+ )
+ except socket.error:
+ pass
+ else:
+ for addr in addrs:
+ if addr[3]:
+ name = addr[3]
+ break
+ return name
+
+
+def is_valid_ip_addr(val):
+ """
+ Returns false if the address is loopback, link local or unspecified;
+ otherwise true is returned.
+ """
+ # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc)
+ # TODO(migrate to use cloudinit.net.is_ip_addr)#
+
+ addr = None
+ try:
+ addr = ipaddress.ip_address(val)
+ except ipaddress.AddressValueError:
+ addr = ipaddress.ip_address(str(val))
+ except Exception:
+ return None
+
+ if addr.is_link_local or addr.is_loopback or addr.is_unspecified:
+ return False
+ return True
+
+
+def get_host_info():
+ """
+ Returns host information such as the host name and network interfaces.
+ """
+ # TODO(look to promote netifices use up in cloud-init netinfo funcs)
+ host_info = {
+ "network": {
+ "interfaces": {
+ "by-mac": collections.OrderedDict(),
+ "by-ipv4": collections.OrderedDict(),
+ "by-ipv6": collections.OrderedDict(),
+ },
+ },
+ }
+ hostname = getfqdn(util.get_hostname())
+ if hostname:
+ host_info["hostname"] = hostname
+ host_info["local-hostname"] = hostname
+ host_info["local_hostname"] = hostname
+
+ default_ipv4, default_ipv6 = get_default_ip_addrs()
+ if default_ipv4:
+ host_info[LOCAL_IPV4] = default_ipv4
+ if default_ipv6:
+ host_info[LOCAL_IPV6] = default_ipv6
+
+ by_mac = host_info["network"]["interfaces"]["by-mac"]
+ by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"]
+ by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"]
+
+ ifaces = netifaces.interfaces()
+ for dev_name in ifaces:
+ addr_fams = netifaces.ifaddresses(dev_name)
+ af_link = addr_fams.get(netifaces.AF_LINK)
+ af_inet4 = addr_fams.get(netifaces.AF_INET)
+ af_inet6 = addr_fams.get(netifaces.AF_INET6)
+
+ mac = None
+ if af_link and "addr" in af_link[0]:
+ mac = af_link[0]["addr"]
+
+ # Do not bother recording localhost
+ if mac == "00:00:00:00:00:00":
+ continue
+
+ if mac and (af_inet4 or af_inet6):
+ key = mac
+ val = {}
+ if af_inet4:
+ af_inet4_vals = []
+ for ip_info in af_inet4:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet4_vals.append(ip_info)
+ val["ipv4"] = af_inet4_vals
+ if af_inet6:
+ af_inet6_vals = []
+ for ip_info in af_inet6:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet6_vals.append(ip_info)
+ val["ipv6"] = af_inet6_vals
+ by_mac[key] = val
+
+ if af_inet4:
+ for ip_info in af_inet4:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv4[key] = val
+
+ if af_inet6:
+ for ip_info in af_inet6:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv6[key] = val
+
+ return host_info
+
+
+def wait_on_network(metadata):
+ # Determine whether we need to wait on the network coming online.
+ wait_on_ipv4 = False
+ wait_on_ipv6 = False
+ if WAIT_ON_NETWORK in metadata:
+ wait_on_network = metadata[WAIT_ON_NETWORK]
+ if WAIT_ON_NETWORK_IPV4 in wait_on_network:
+ wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4]
+ if isinstance(wait_on_ipv4_val, bool):
+ wait_on_ipv4 = wait_on_ipv4_val
+ else:
+ wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val)
+ if WAIT_ON_NETWORK_IPV6 in wait_on_network:
+ wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6]
+ if isinstance(wait_on_ipv6_val, bool):
+ wait_on_ipv6 = wait_on_ipv6_val
+ else:
+ wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val)
+
+ # Get information about the host.
+ host_info = None
+ while host_info is None:
+ # This loop + sleep results in two logs every second while waiting
+ # for either ipv4 or ipv6 up. Do we really need to log each iteration
+ # or can we log once and log on successful exit?
+ host_info = get_host_info()
+
+ network = host_info.get("network") or {}
+ interfaces = network.get("interfaces") or {}
+ by_ipv4 = interfaces.get("by-ipv4") or {}
+ by_ipv6 = interfaces.get("by-ipv6") or {}
+
+ if wait_on_ipv4:
+ ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False
+ if not ipv4_ready:
+ host_info = None
+
+ if wait_on_ipv6:
+ ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False
+ if not ipv6_ready:
+ host_info = None
+
+ if host_info is None:
+ LOG.debug(
+ "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s",
+ wait_on_ipv4,
+ ipv4_ready,
+ wait_on_ipv6,
+ ipv6_ready,
+ )
+ time.sleep(1)
+
+ LOG.debug("waiting on network complete")
+ return host_info
+
+
+def main():
+ """
+ Executed when this file is used as a program.
+ """
+ try:
+ logging.setupBasicLogging()
+ except Exception:
+ pass
+ metadata = {
+ "wait-on-network": {"ipv4": True, "ipv6": "false"},
+ "network": {"config": {"dhcp": True}},
+ }
+ host_info = wait_on_network(metadata)
+ metadata = util.mergemanydict([metadata, host_info])
+ print(util.json_dumps(metadata))
+
+
+if __name__ == "__main__":
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index bf6bf139..cc7e1c3c 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -679,6 +679,16 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
+ def get_supported_events(self, source_event_types: List[EventType]):
+ supported_events = {} # type: Dict[EventScope, set]
+ for event in source_event_types:
+ for update_scope, update_events in self.supported_update_events.items(): # noqa: E501
+ if event in update_events:
+ if not supported_events.get(update_scope):
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
+ return supported_events
+
def update_metadata_if_supported(
self, source_event_types: List[EventType]
) -> bool:
@@ -694,13 +704,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@return True if the datasource did successfully update cached metadata
due to source_event_type.
"""
- supported_events = {} # type: Dict[EventScope, set]
- for event in source_event_types:
- for update_scope, update_events in self.supported_update_events.items(): # noqa: E501
- if event in update_events:
- if not supported_events.get(update_scope):
- supported_events[update_scope] = set()
- supported_events[update_scope].add(event)
+ supported_events = self.get_supported_events(source_event_types)
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index ad476076..a5ac1d57 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -344,6 +344,40 @@ def http_with_retries(url, **kwargs) -> str:
raise exc
+def build_minimal_ovf(
+ username: str,
+ hostname: str,
+ disableSshPwd: str) -> bytes:
+ OVF_ENV_TEMPLATE = textwrap.dedent('''\
+ <ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns1="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <ns1:ProvisioningSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:LinuxProvisioningConfigurationSet>
+ <ns1:ConfigurationSetType>LinuxProvisioningConfiguration
+ </ns1:ConfigurationSetType>
+ <ns1:UserName>{username}</ns1:UserName>
+ <ns1:DisableSshPasswordAuthentication>{disableSshPwd}
+ </ns1:DisableSshPasswordAuthentication>
+ <ns1:HostName>{hostname}</ns1:HostName>
+ </ns1:LinuxProvisioningConfigurationSet>
+ </ns1:ProvisioningSection>
+ <ns1:PlatformSettingsSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:PlatformSettings>
+ <ns1:ProvisionGuestAgent>true</ns1:ProvisionGuestAgent>
+ </ns1:PlatformSettings>
+ </ns1:PlatformSettingsSection>
+ </ns0:Environment>
+ ''')
+ ret = OVF_ENV_TEMPLATE.format(
+ username=username,
+ hostname=hostname,
+ disableSshPwd=disableSshPwd)
+ return ret.encode('utf-8')
+
+
class AzureEndpointHttpClient:
headers = {
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 89057262..b8a3c8f7 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -249,6 +249,113 @@ def render_authorizedkeysfile_paths(value, homedir, username):
return rendered
+# Inspired from safe_path() in openssh source code (misc.c).
+def check_permissions(username, current_path, full_path, is_file, strictmodes):
+ """Check if the file/folder in @current_path has the right permissions.
+
+ We need to check that:
+ 1. If StrictMode is enabled, the owner is either root or the user
+ 2. the user can access the file/folder, otherwise ssh won't use it
+ 3. If StrictMode is enabled, no write permission is given to group
+ and world users (022)
+ """
+
+ # group/world can only execute the folder (access)
+ minimal_permissions = 0o711
+ if is_file:
+ # group/world can only read the file
+ minimal_permissions = 0o644
+
+ # 1. owner must be either root or the user itself
+ owner = util.get_owner(current_path)
+ if strictmodes and owner != username and owner != "root":
+ LOG.debug("Path %s in %s must be own by user %s or"
+ " by root, but instead is own by %s. Ignoring key.",
+ current_path, full_path, username, owner)
+ return False
+
+ parent_permission = util.get_permissions(current_path)
+ # 2. the user can access the file/folder, otherwise ssh won't use it
+ if owner == username:
+ # need only the owner permissions
+ minimal_permissions &= 0o700
+ else:
+ group_owner = util.get_group(current_path)
+ user_groups = util.get_user_groups(username)
+
+ if group_owner in user_groups:
+ # need only the group permissions
+ minimal_permissions &= 0o070
+ else:
+ # need only the world permissions
+ minimal_permissions &= 0o007
+
+ if parent_permission & minimal_permissions == 0:
+ LOG.debug("Path %s in %s must be accessible by user %s,"
+ " check its permissions",
+ current_path, full_path, username)
+ return False
+
+ # 3. no write permission (w) is given to group and world users (022)
+ # Group and world user can still have +rx.
+ if strictmodes and parent_permission & 0o022 != 0:
+ LOG.debug("Path %s in %s must not give write"
+ "permission to group or world users. Ignoring key.",
+ current_path, full_path)
+ return False
+
+ return True
+
+
+def check_create_path(username, filename, strictmodes):
+ user_pwent = users_ssh_info(username)[1]
+ root_pwent = users_ssh_info("root")[1]
+ try:
+ # check the directories first
+ directories = filename.split("/")[1:-1]
+
+ # scan in order, from root to file name
+ parent_folder = ""
+ # this is to comply also with unit tests, and
+ # strange home directories
+ home_folder = os.path.dirname(user_pwent.pw_dir)
+ for directory in directories:
+ parent_folder += "/" + directory
+ if home_folder.startswith(parent_folder):
+ continue
+
+ if not os.path.isdir(parent_folder):
+ # directory does not exist, and permission so far are good:
+ # create the directory, and make it accessible by everyone
+ # but owned by root, as it might be used by many users.
+ with util.SeLinuxGuard(parent_folder):
+ os.makedirs(parent_folder, mode=0o755, exist_ok=True)
+ util.chownbyid(parent_folder, root_pwent.pw_uid,
+ root_pwent.pw_gid)
+
+ permissions = check_permissions(username, parent_folder,
+ filename, False, strictmodes)
+ if not permissions:
+ return False
+
+ # check the file
+ if not os.path.exists(filename):
+ # if file does not exist: we need to create it, since the
+ # folders at this point exist and have right permissions
+ util.write_file(filename, '', mode=0o600, ensure_dir_exists=True)
+ util.chownbyid(filename, user_pwent.pw_uid, user_pwent.pw_gid)
+
+ permissions = check_permissions(username, filename,
+ filename, True, strictmodes)
+ if not permissions:
+ return False
+ except (IOError, OSError) as e:
+ util.logexc(LOG, str(e))
+ return False
+
+ return True
+
+
def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
(ssh_dir, pw_ent) = users_ssh_info(username)
default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys')
@@ -259,6 +366,7 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
ssh_cfg = parse_ssh_config_map(sshd_cfg_file)
key_paths = ssh_cfg.get("authorizedkeysfile",
"%h/.ssh/authorized_keys")
+ strictmodes = ssh_cfg.get("strictmodes", "yes")
auth_key_fns = render_authorizedkeysfile_paths(
key_paths, pw_ent.pw_dir, username)
@@ -269,31 +377,31 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
"config from %r, using 'AuthorizedKeysFile' file "
"%r instead", DEF_SSHD_CFG, auth_key_fns[0])
- # check if one of the keys is the user's one
+ # check if one of the keys is the user's one and has the right permissions
for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns):
if any([
'%u' in key_path,
'%h' in key_path,
auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir))
]):
- user_authorizedkeys_file = auth_key_fn
+ permissions_ok = check_create_path(username, auth_key_fn,
+ strictmodes == "yes")
+ if permissions_ok:
+ user_authorizedkeys_file = auth_key_fn
+ break
if user_authorizedkeys_file != default_authorizedkeys_file:
LOG.debug(
"AuthorizedKeysFile has an user-specific authorized_keys, "
"using %s", user_authorizedkeys_file)
- # always store all the keys in the user's private file
- return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns))
+ return (
+ user_authorizedkeys_file,
+ parse_authorized_keys([user_authorizedkeys_file])
+ )
def setup_user_keys(keys, username, options=None):
- # Make sure the users .ssh dir is setup accordingly
- (ssh_dir, pwent) = users_ssh_info(username)
- if not os.path.isdir(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0o700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
-
# Turn the 'update' keys given into actual entries
parser = AuthKeyLineParser()
key_entries = []
@@ -302,11 +410,10 @@ def setup_user_keys(keys, username, options=None):
# Extract the old and make the new
(auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
+ ssh_dir = os.path.dirname(auth_key_fn)
with util.SeLinuxGuard(ssh_dir, recursive=True):
content = update_authorized_keys(auth_key_entries, key_entries)
- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
- util.write_file(auth_key_fn, content, mode=0o600)
- util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
+ util.write_file(auth_key_fn, content, preserve_mode=True)
class SshdConfigLine(object):
diff --git a/cloudinit/tests/.test_util.py.swp b/cloudinit/tests/.test_util.py.swp
deleted file mode 100644
index 78ef5865..00000000
--- a/cloudinit/tests/.test_util.py.swp
+++ /dev/null
Binary files differ
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index a1ccb1dc..9dd01158 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -124,6 +124,38 @@ OS_RELEASE_ALMALINUX_8 = dedent("""\
ALMALINUX_MANTISBT_PROJECT_VERSION="8.3"
""")
+OS_RELEASE_EUROLINUX_7 = dedent("""\
+ VERSION="7.9 (Minsk)"
+ ID="eurolinux"
+ ID_LIKE="rhel scientific centos fedora"
+ VERSION_ID="7.9"
+ PRETTY_NAME="EuroLinux 7.9 (Minsk)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA"
+ HOME_URL="http://www.euro-linux.com/"
+ BUG_REPORT_URL="mailto:support@euro-linux.com"
+ REDHAT_BUGZILLA_PRODUCT="EuroLinux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.9
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.9"
+""")
+
+OS_RELEASE_EUROLINUX_8 = dedent("""\
+ NAME="EuroLinux"
+ VERSION="8.4 (Vaduz)"
+ ID="eurolinux"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="8.4"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="EuroLinux 8.4 (Vaduz)"
+ ANSI_COLOR="0;34"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:8"
+ HOME_URL="https://www.euro-linux.com/"
+ BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/"
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="8"
+""")
+
OS_RELEASE_ROCKY_8 = dedent("""\
NAME="Rocky Linux"
VERSION="8.3 (Green Obsidian)"
@@ -140,6 +172,20 @@ OS_RELEASE_ROCKY_8 = dedent("""\
ROCKY_SUPPORT_PRODUCT_VERSION="8"
""")
+OS_RELEASE_VIRTUOZZO_8 = dedent("""\
+ NAME="Virtuozzo Linux"
+ VERSION="8"
+ ID="virtuozzo"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Virtuozzo Linux"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8"
+ HOME_URL="https://www.vzlinux.org"
+ BUG_REPORT_URL="https://bugs.openvz.org"
+""")
+
REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
REDHAT_RELEASE_REDHAT_6 = (
@@ -148,8 +194,12 @@ REDHAT_RELEASE_REDHAT_7 = (
"Red Hat Enterprise Linux Server release 7.5 (Maipo)")
REDHAT_RELEASE_ALMALINUX_8 = (
"AlmaLinux release 8.3 (Purple Manul)")
+REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)"
+REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)"
REDHAT_RELEASE_ROCKY_8 = (
"Rocky Linux release 8.3 (Green Obsidian)")
+REDHAT_RELEASE_VIRTUOZZO_8 = (
+ "Virtuozzo Linux release 8")
OS_RELEASE_DEBIAN = dedent("""\
PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
@@ -566,6 +616,38 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist)
@mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists):
"""Verify rocky linux 8 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_ROCKY_8
@@ -582,6 +664,22 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist)
@mock.patch('cloudinit.util.load_file')
+ def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
def test_get_linux_debian(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on Debian."""
m_os_release.return_value = OS_RELEASE_DEBIAN
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 7995c6c8..c53f6453 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -35,6 +35,7 @@ from base64 import b64decode, b64encode
from errno import ENOENT
from functools import lru_cache
from urllib import parse
+from typing import List
from cloudinit import importer
from cloudinit import log as logging
@@ -453,9 +454,19 @@ def _parse_redhat_release(release_file=None):
redhat_regex = (
r'(?P<name>.+) release (?P<version>[\d\.]+) '
r'\((?P<codename>[^)]+)\)')
+
+ # Virtuozzo deviates here
+ if "Virtuozzo" in redhat_release:
+ redhat_regex = r'(?P<name>.+) release (?P<version>[\d\.]+)'
+
match = re.match(redhat_regex, redhat_release)
if match:
group = match.groupdict()
+
+ # Virtuozzo has no codename in this file
+ if "Virtuozzo" in group['name']:
+ group['codename'] = group['name']
+
group['name'] = group['name'].lower().partition(' linux')[0]
if group['name'] == 'red hat enterprise':
group['name'] = 'redhat'
@@ -470,9 +481,11 @@ def get_linux_distro():
distro_version = ''
flavor = ''
os_release = {}
+ os_release_rhel = False
if os.path.exists('/etc/os-release'):
os_release = load_shell_content(load_file('/etc/os-release'))
if not os_release:
+ os_release_rhel = True
os_release = _parse_redhat_release()
if os_release:
distro_name = os_release.get('ID', '')
@@ -485,6 +498,9 @@ def get_linux_distro():
flavor = platform.machine()
elif distro_name == 'photon':
flavor = os_release.get('PRETTY_NAME', '')
+ elif distro_name == 'virtuozzo' and not os_release_rhel:
+ # Only use this if the redhat file is not parsed
+ flavor = os_release.get('PRETTY_NAME', '')
else:
flavor = os_release.get('VERSION_CODENAME', '')
if not flavor:
@@ -532,8 +548,8 @@ def system_info():
if system == "linux":
linux_dist = info['dist'][0].lower()
if linux_dist in (
- 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora',
- 'photon', 'rhel', 'rocky', 'suse'):
+ 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'eurolinux',
+ 'fedora', 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
@@ -1863,6 +1879,53 @@ def chmod(path, mode):
os.chmod(path, real_mode)
+def get_permissions(path: str) -> int:
+ """
+ Returns the octal permissions of the file/folder pointed by the path,
+ encoded as an int.
+
+ @param path: The full path of the file/folder.
+ """
+
+ return stat.S_IMODE(os.stat(path).st_mode)
+
+
+def get_owner(path: str) -> str:
+ """
+ Returns the owner of the file/folder pointed by the path.
+
+ @param path: The full path of the file/folder.
+ """
+ st = os.stat(path)
+ return pwd.getpwuid(st.st_uid).pw_name
+
+
+def get_group(path: str) -> str:
+ """
+ Returns the group of the file/folder pointed by the path.
+
+ @param path: The full path of the file/folder.
+ """
+ st = os.stat(path)
+ return grp.getgrgid(st.st_gid).gr_name
+
+
+def get_user_groups(username: str) -> List[str]:
+ """
+ Returns a list of all groups to which the user belongs
+
+ @param username: the user we want to check
+ """
+ groups = []
+ for group in grp.getgrall():
+ if username in group.gr_mem:
+ groups.append(group.gr_name)
+
+ gid = pwd.getpwnam(username).pw_gid
+ groups.append(grp.getgrgid(gid).gr_name)
+ return groups
+
+
def write_file(
filename,
content,
@@ -1889,8 +1952,7 @@ def write_file(
if preserve_mode:
try:
- file_stat = os.stat(filename)
- mode = stat.S_IMODE(file_stat.st_mode)
+ mode = get_permissions(filename)
except OSError:
pass
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index cb2a625b..825deff4 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -18,9 +18,10 @@ users:
- default
{% endif %}
-# VMware guest customization.
{% if variant in ["photon"] %}
+# VMware guest customization.
disable_vmware_customization: true
+manage_etc_hosts: false
{% endif %}
# If this is set, 'root' will not be able to ssh in and they
@@ -31,8 +32,8 @@ disable_root: false
disable_root: true
{% endif %}
-{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora",
- "rhel", "rocky"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "centos", "eurolinux",
+ "fedora", "rhel", "rocky", "virtuozzo"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
@@ -173,8 +174,8 @@ cloud_final_modules:
system_info:
# This will affect which distro class gets used
{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel",
- "rocky", "suse", "ubuntu"] %}
+ "eurolinux", "fedora", "freebsd", "netbsd", "openbsd",
+ "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %}
distro: {{ variant }}
{% elif variant in ["dragonfly"] %}
distro: dragonflybsd
@@ -227,8 +228,8 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora",
- "rhel", "rocky", "suse"] %}
+{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "eurolinux",
+ "fedora", "rhel", "rocky", "suse", "virtuozzo"] %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
@@ -306,10 +307,15 @@ system_info:
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
+ network:
+ renderers: ['networkd']
ssh_svcname: sshd
-#manage_etc_hosts: true
+ # If set to true, cloud-init will not use fallback network config.
+ # In Photon, we have default network settings, hence if network settings are
+ # not explicitly given in metadata, don't use fallback network config.
+ disable_fallback_netcfg: true
{% endif %}
{% if variant in ["freebsd", "netbsd", "openbsd"] %}
network:
diff --git a/debian/changelog b/debian/changelog
index a7ef2618..7055ed68 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,46 @@
+cloud-init (21.2-69-g65607405-0ubuntu1) impish; urgency=medium
+
+ * d/cloud-init.templates: Add VMware to default datasource_list
+ * d/control: Add dependencies on python3-netifaces for vmware ds
+ * New upstream snapshot.
+ - Only invoke hotplug socket when functionality is enabled (#952)
+ - Revert unnecesary lcase in ds-identify (#978) [Andrew Kutz]
+ - cc_resolv_conf: fix typos (#969) [Shreenidhi Shedi]
+ - Replace broken httpretty tests with mock (SC-324) (#973)
+ - Azure: Check if interface is up after sleep when trying to bring it up
+ (#972) [aswinrajamannar]
+ - Update dscheck_VMware's rpctool check (#970) [Shreenidhi Shedi]
+ - Azure: Logging the detected interfaces (#968) [Moustafa Moustafa]
+ - Change netifaces dependency to 0.10.4 (#965) [Andrew Kutz]
+ - Azure: Limit polling network metadata on connection errors (#961)
+ [aswinrajamannar]
+ - Update inconsistent indentation (#962) [Andrew Kutz]
+ - cc_puppet: support AIO installations and more (#960) [Gabriel Nagy]
+ - Add Puppet contributors to CLA signers (#964) [Noah Fontes]
+ - Datasource for VMware (#953) [Andrew Kutz]
+ - photon: refactor hostname handling and add networkd activator (#958)
+ [sshedi]
+ - Stop copying ssh system keys and check folder permissions (#956)
+ [Emanuele Giuseppe Esposito]
+ - testing: port remaining cloud tests to integration testing framework
+ (SC-191) (#955)
+ - generate contents for ovf-env.xml when provisioning via IMDS (#959)
+ [Anh Vo]
+ - Add support for EuroLinux 7 && EuroLinux 8 (#957) [Aleksander Baranowski]
+ - Implementing device_aliases as described in docs (#945)
+ [Mal Graty] (LP: #1867532)
+ - testing: fix test_ssh_import_id.py (#954)
+ - Add ability to manage fallback network config on PhotonOS (#941) [sshedi]
+ - Add VZLinux support (#951) [eb3095]
+ - VMware: add network-config support in ovf-env.xml (#947) [PengpengSun]
+ - Update pylint to v2.9.3 and fix the new issues it spots (#946)
+ [Paride Legovini]
+ - Azure: mount default provisioning iso before try device listing (#870)
+ [Anh Vo]
+ - Document known hotplug limitations (#950)
+
+ -- James Falcon <james.falcon@canonical.com> Fri, 13 Aug 2021 15:37:31 -0500
+
cloud-init (21.2-43-g184c836a-0ubuntu1) impish; urgency=medium
* New upstream snapshot.
diff --git a/debian/cloud-init.templates b/debian/cloud-init.templates
index 13f6df8d..75ac07b8 100644
--- a/debian/cloud-init.templates
+++ b/debian/cloud-init.templates
@@ -1,8 +1,8 @@
Template: cloud-init/datasources
Type: multiselect
-Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, Vultr, None
-Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, Vultr, None
-__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, Vultr: Vultr Cloud, None: Failsafe datasource
+Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, None
+Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, None
+__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, VMware: reads data from guestinfo table or env vars, Vultr: Vultr Cloud, None: Failsafe datasource
_Description: Which data sources should be searched?
Cloud-init supports searching different "Data Sources" for information
that it uses to configure a cloud instance.
diff --git a/debian/control b/debian/control
index 6c739892..8fae4be8 100644
--- a/debian/control
+++ b/debian/control
@@ -15,6 +15,7 @@ Build-Depends: debhelper-compat (= 13),
python3-jsonpatch,
python3-jsonschema,
python3-mock,
+ python3-netifaces,
python3-oauthlib,
python3-pytest,
python3-requests,
@@ -35,6 +36,7 @@ Depends: cloud-guest-utils | cloud-utils,
netplan.io,
procps,
python3,
+ python3-netifaces,
python3-requests,
python3-serial,
${misc:Depends},
diff --git a/debian/po/templates.pot b/debian/po/templates.pot
index f43ac245..fb0df2c0 100644
--- a/debian/po/templates.pot
+++ b/debian/po/templates.pot
@@ -158,6 +158,12 @@ msgstr ""
#. Type: multiselect
#. Choices
#: ../cloud-init.templates:1001
+msgid "VMware: reads data from guestinfo table or env vars"
+msgstr ""
+
+#. Type: multiselect
+#. Choices
+#: ../cloud-init.templates:1001
msgid "Vultr: Vultr Cloud"
msgstr ""
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
index 3c7e2da7..c6bc15de 100644
--- a/doc/examples/cloud-config-puppet.txt
+++ b/doc/examples/cloud-config-puppet.txt
@@ -1,25 +1,65 @@
#cloud-config
#
-# This is an example file to automatically setup and run puppetd
+# This is an example file to automatically setup and run puppet
# when the instance boots for the first time.
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
puppet:
+ # Boolean: whether or not to install puppet (default: true)
+ install: true
+
+ # A specific version to pass to the installer script or package manager
+ version: "7.7.0"
+
+ # Valid values are 'packages' and 'aio' (default: 'packages')
+ install_type: "packages"
+
+ # Puppet collection to install if 'install_type' is 'aio'
+ collection: "puppet7"
+
+ # Boolean: whether or not to remove the puppetlabs repo after installation
+ # if 'install_type' is 'aio' (default: true)
+ cleanup: true
+
+ # If 'install_type' is 'aio', change the url to the install script
+ aio_install_url: "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh"
+
+ # Path to the puppet config file (default: depends on 'install_type')
+ conf_file: "/etc/puppet/puppet.conf"
+
+ # Path to the puppet SSL directory (default: depends on 'install_type')
+ ssl_dir: "/var/lib/puppet/ssl"
+
+ # Path to the CSR attributes file (default: depends on 'install_type')
+ csr_attributes_path: "/etc/puppet/csr_attributes.yaml"
+
+ # The name of the puppet package to install (no-op if 'install_type' is 'aio')
+ package_name: "puppet"
+
+ # Boolean: whether or not to run puppet after configuration finishes
+ # (default: false)
+ exec: false
+
+ # A list of arguments to pass to 'puppet agent' if 'exec' is true
+ # (default: ['--test'])
+ exec_args: ['--test']
+
# Every key present in the conf object will be added to puppet.conf:
# [name]
# subkey=value
#
# For example the configuration below will have the following section
# added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
+ # [main]
+ # server=puppetserver.example.org
# certname=i-0123456.ip-X-Y-Z.cloud.internal
#
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
+ # The puppetserver ca certificate will be available in
+ # /var/lib/puppet/ssl/certs/ca.pem if using distro packages
+ # or /etc/puppetlabs/puppet/ssl/certs/ca.pem if using AIO packages.
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
# certname supports substitutions at runtime:
# %i: instanceid
# Example: i-0123456
@@ -29,11 +69,13 @@ puppet:
# NB: the certname will automatically be lowercased as required by puppet
certname: "%i.%f"
# ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
+ # It holds the puppetserver certificate in pem format.
# It should be a multi-line string (using the | yaml notation for
# multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ # The puppetserver certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host if using
+ # distro packages or /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem if using AIO
+ # packages.
#
ca_cert: |
-----BEGIN CERTIFICATE-----
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index a45a49d6..71827177 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -26,7 +26,8 @@ OpenBSD and DragonFlyBSD:
- Gentoo Linux
- NetBSD
- OpenBSD
-- RHEL/CentOS
+- Photon OS
+- RHEL/CentOS/AlmaLinux/Rocky Linux/EuroLinux
- SLES/openSUSE
- Ubuntu
@@ -66,5 +67,6 @@ Additionally, cloud-init is supported on these private clouds:
- LXD
- KVM
- Metal-as-a-Service (MAAS)
+- VMware
.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 497b1467..f5aee1c2 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -50,6 +50,7 @@ The following is a list of documents for each supported datasource:
datasources/upcloud.rst
datasources/zstack.rst
datasources/vultr.rst
+ datasources/vmware.rst
Creation
========
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
new file mode 100644
index 00000000..996eb61f
--- /dev/null
+++ b/doc/rtd/topics/datasources/vmware.rst
@@ -0,0 +1,359 @@
+.. _datasource_vmware:
+
+VMware
+======
+
+This datasource is for use with systems running on a VMware platform such as
+vSphere and currently supports the following data transports:
+
+
+* `GuestInfo <https://github.com/vmware/govmomi/blob/master/govc/USAGE.md#vmchange>`_ keys
+
+Configuration
+-------------
+
+The configuration method is dependent upon the transport:
+
+GuestInfo Keys
+^^^^^^^^^^^^^^
+
+One method of providing meta, user, and vendor data is by setting the following
+key/value pairs on a VM's ``extraConfig`` `property <https://vdc-repo.vmware.com/vmwb-repository/dcr-public/723e7f8b-4f21-448b-a830-5f22fd931b01/5a8257bd-7f41-4423-9a73-03307535bd42/doc/vim.vm.ConfigInfo.html>`_ :
+
+.. list-table::
+ :header-rows: 1
+
+ * - Property
+ - Description
+ * - ``guestinfo.metadata``
+ - A YAML or JSON document containing the cloud-init metadata.
+ * - ``guestinfo.metadata.encoding``
+ - The encoding type for ``guestinfo.metadata``.
+ * - ``guestinfo.userdata``
+ - A YAML document containing the cloud-init user data.
+ * - ``guestinfo.userdata.encoding``
+ - The encoding type for ``guestinfo.userdata``.
+ * - ``guestinfo.vendordata``
+ - A YAML document containing the cloud-init vendor data.
+ * - ``guestinfo.vendordata.encoding``
+ - The encoding type for ``guestinfo.vendordata``.
+
+
+All ``guestinfo.*.encoding`` values may be set to ``base64`` or
+``gzip+base64``.
+
+Features
+--------
+
+This section reviews several features available in this datasource, regardless
+of how the meta, user, and vendor data was discovered.
+
+Instance data and lazy networks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+One of the hallmarks of cloud-init is `its use of instance-data and JINJA
+queries <../instancedata.html#using-instance-data>`_
+-- the ability to write queries in user and vendor data that reference runtime
+information present in ``/run/cloud-init/instance-data.json``. This works well
+when the metadata provides all of the information up front, such as the network
+configuration. For systems that rely on DHCP, however, this information may not
+be available when the metadata is persisted to disk.
+
+This datasource ensures that even if the instance is using DHCP to configure
+networking, the same details about the configured network are available in
+``/run/cloud-init/instance-data.json`` as if static networking was used. This
+information collected at runtime is easy to demonstrate by executing the
+datasource on the command line. From the root of this repository, run the
+following command:
+
+.. code-block:: bash
+
+ PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py
+
+The above command will result in output similar to the below JSON:
+
+.. code-block:: json
+
+ {
+ "hostname": "akutz.localhost",
+ "local-hostname": "akutz.localhost",
+ "local-ipv4": "192.168.0.188",
+ "local_hostname": "akutz.localhost",
+ "network": {
+ "config": {
+ "dhcp": true
+ },
+ "interfaces": {
+ "by-ipv4": {
+ "172.0.0.2": {
+ "netmask": "255.255.255.255",
+ "peer": "172.0.0.2"
+ },
+ "192.168.0.188": {
+ "broadcast": "192.168.0.255",
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "255.255.255.0"
+ }
+ },
+ "by-ipv6": {
+ "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": {
+ "flags": 208,
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ },
+ "by-mac": {
+ "64:4b:f0:18:9a:21": {
+ "ipv4": [
+ {
+ "addr": "192.168.0.188",
+ "broadcast": "192.168.0.255",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "ipv6": [
+ {
+ "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2",
+ "flags": 208,
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ ]
+ },
+ "ac:de:48:00:11:22": {
+ "ipv6": []
+ }
+ }
+ }
+ },
+ "wait-on-network": {
+ "ipv4": true,
+ "ipv6": "false"
+ }
+ }
+
+
+Redacting sensitive information
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes the cloud-init userdata might contain sensitive information, and it
+may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo
+keys) redacted as soon as its data is read by the datasource. This is possible
+by adding the following to the metadata:
+
+.. code-block:: yaml
+
+ redact: # formerly named cleanup-guestinfo, which will also work
+ - userdata
+ - vendordata
+
+When the above snippet is added to the metadata, the datasource will iterate
+over the elements in the ``redact`` array and clear each of the keys. For
+example, when the guestinfo transport is used, the above snippet will cause
+the following commands to be executed:
+
+.. code-block:: shell
+
+ vmware-rpctool "info-set guestinfo.userdata ---"
+ vmware-rpctool "info-set guestinfo.userdata.encoding "
+ vmware-rpctool "info-set guestinfo.vendordata ---"
+ vmware-rpctool "info-set guestinfo.vendordata.encoding "
+
+Please note that keys are set to the valid YAML string ``---`` as it is not
+possible remove an existing key from the guestinfo key-space. A key's analogous
+encoding property will be set to a single white-space character, causing the
+datasource to treat the actual key value as plain-text, thereby loading it as
+an empty YAML doc (hence the aforementioned ``---``\ ).
+
+Reading the local IP addresses
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This datasource automatically discovers the local IPv4 and IPv6 addresses for
+a guest operating system based on the default routes. However, when inspecting
+a VM externally, it's not possible to know what the *default* IP address is for
+the guest OS. That's why this datasource sets the discovered, local IPv4 and
+IPv6 addresses back in the guestinfo namespace as the following keys:
+
+
+* ``guestinfo.local-ipv4``
+* ``guestinfo.local-ipv6``
+
+It is possible that a host may not have any default, local IP addresses. It's
+also possible the reported, local addresses are link-local addresses. But these
+two keys may be used to discover what this datasource determined were the local
+IPv4 and IPv6 addresses for a host.
+
+Waiting on the network
+^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes cloud-init may bring up the network, but it will not finish coming
+online before the datasource's ``setup`` function is called, resulting in an
+``/var/run/cloud-init/instance-data.json`` file that does not have the correct
+network information. It is possible to instruct the datasource to wait until an
+IPv4 or IPv6 address is available before writing the instance data with the
+following metadata properties:
+
+.. code-block:: yaml
+
+ wait-on-network:
+ ipv4: true
+ ipv6: true
+
+If either of the above values are true, then the datasource will sleep for a
+second, check the network status, and repeat until one or both addresses from
+the specified families are available.
+
+Walkthrough
+-----------
+
+The following series of steps is a demonstration on how to configure a VM with
+this datasource:
+
+
+#. Create the metadata file for the VM. Save the following YAML to a file named
+ ``metadata.yaml``\ :
+
+ .. code-block:: yaml
+
+ instance-id: cloud-vm
+ local-hostname: cloud-vm
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+
+#. Create the userdata file ``userdata.yaml``\ :
+
+ .. code-block:: yaml
+
+ #cloud-config
+
+ users:
+ - default
+ - name: akutz
+ primary_group: akutz
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: sudo, wheel
+ ssh_import_id: None
+ lock_passwd: true
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com
+
+#. Please note this step requires that the VM be powered off. All of the
+ commands below use the VMware CLI tool, `govc <https://github.com/vmware/govmomi/blob/master/govc>`_.
+
+ Go ahead and assign the path to the VM to the environment variable ``VM``\ :
+
+ .. code-block:: shell
+
+ export VM="/inventory/path/to/the/vm"
+
+#. Power off the VM:
+
+ .. raw:: html
+
+ <hr />
+
+ &#x26a0;&#xfe0f; <strong>First Boot Mode</strong>
+
+ To ensure the next power-on operation results in a first-boot scenario for
+ cloud-init, it may be necessary to run the following command just before
+ powering off the VM:
+
+ .. code-block:: bash
+
+ cloud-init clean
+
+ Otherwise cloud-init may not run in first-boot mode. For more information
+ on how the boot mode is determined, please see the
+ `First Boot Documentation <../boot.html#first-boot-determination>`_.
+
+ .. raw:: html
+
+ <hr />
+
+ .. code-block:: shell
+
+ govc vm.power -off "${VM}"
+
+#.
+ Export the environment variables that contain the cloud-init metadata and
+ userdata:
+
+ .. code-block:: shell
+
+ export METADATA=$(gzip -c9 <metadata.yaml | { base64 -w0 2>/dev/null || base64; }) \
+ USERDATA=$(gzip -c9 <userdata.yaml | { base64 -w0 2>/dev/null || base64; })
+
+#.
+ Assign the metadata and userdata to the VM:
+
+ .. code-block:: shell
+
+ govc vm.change -vm "${VM}" \
+ -e guestinfo.metadata="${METADATA}" \
+ -e guestinfo.metadata.encoding="gzip+base64" \
+ -e guestinfo.userdata="${USERDATA}" \
+ -e guestinfo.userdata.encoding="gzip+base64"
+
+ Please note the above commands include specifying the encoding for the
+ properties. This is important as it informs the datasource how to decode
+ the data for cloud-init. Valid values for ``metadata.encoding`` and
+ ``userdata.encoding`` include:
+
+
+ * ``base64``
+ * ``gzip+base64``
+
+#.
+ Power on the VM:
+
+ .. code-block:: shell
+
+ govc vm.power -vm "${VM}" -on
+
+If all went according to plan, the CentOS box is:
+
+* Locked down, allowing SSH access only for the user in the userdata
+* Configured for a dynamic IP address via DHCP
+* Has a hostname of ``cloud-vm``
+
+Examples
+--------
+
+This section reviews common configurations:
+
+Setting the hostname
+^^^^^^^^^^^^^^^^^^^^
+
+The hostname is set by way of the metadata key ``local-hostname``.
+
+Setting the instance ID
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The instance ID may be set by way of the metadata key ``instance-id``. However,
+if this value is absent then then the instance ID is read from the file
+``/sys/class/dmi/id/product_uuid``.
+
+Providing public SSH keys
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The public SSH keys may be set by way of the metadata key ``public-keys-data``.
+Each newline-terminated string will be interpreted as a separate SSH public
+key, which will be placed in distro's default user's
+``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will
+be written to ``~/.ssh/authorized_keys``.
+
+Configuring the network
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The network is configured by setting the metadata key ``network`` with a value
+consistent with Network Config Versions
+`1 <../network-config-format-v1.html>`_ or
+`2 <../network-config-format-v2.html>`_\ , depending on the Linux
+distro's version of cloud-init.
+
+The metadata key ``network.encoding`` may be used to indicate the format of
+the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``.
diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst
index 984e7577..57797bd9 100644
--- a/doc/rtd/topics/events.rst
+++ b/doc/rtd/topics/events.rst
@@ -71,6 +71,11 @@ user data, cloud-init will respond to the addition or removal of network
interfaces to the system. In addition to fetching and updating the system
metadata, cloud-init will also bring up/down the newly added interface.
+.. warning:: Due to its use of systemd sockets, hotplug functionality
+ is currently incompatible with SELinux. This issue is being tracked
+ `on Launchpad`_. Additionally, hotplug support is considered experimental for
+ non-Debian based systems.
+
Examples
========
@@ -83,7 +88,8 @@ On every boot, apply network configuration found in the datasource.
# apply network config on every boot
updates:
network:
- when: ['boot', 'hotplug']
+ when: ['boot']
.. _Cloud-init: https://launchpad.net/cloud-init
+.. _on Launchpad: https://bugs.launchpad.net/cloud-init/+bug/1936229
.. vi: textwidth=78
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 5f7a74f8..8eb7a31b 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -104,6 +104,13 @@ interface given the information it has available.
Finally after selecting the "right" interface, a configuration is
generated and applied to the system.
+.. note::
+
+ PhotonOS disables fallback networking configuration by default leaving
+ network unrendered when no other network config is provided.
+ If fallback config is still desired on PhotonOS, it can be enabled by
+ providing `disable_fallback_netcfg: false` in
+ `/etc/cloud/cloud.cfg:sys_config` settings.
Network Configuration Sources
=============================
diff --git a/doc/sources/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml
index 13e8f104..4ef4ee63 100644
--- a/doc/sources/ovf/example/ovf-env.xml
+++ b/doc/sources/ovf/example/ovf-env.xml
@@ -41,6 +41,14 @@
-->
<Property oe:key="user-data" oe:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="/>
<Property oe:key="password" oe:value="passw0rd"/>
+ <!--
+ network-config is optional, it can only be read from VMware guestinfo.ovfEnv
+ The value for network-config is to be base64 encoded.
+ It will be decoded, and then processed normally as network-config.
+ Set ovf-env.xml to VMware guestinfo.ovfEnv by below command:
+ 'vmware-rpctool "info-set guestinfo.ovfEnv `cat ./ovf-env.xml`"'
+ -->
+ <Property oe:key="network-config" oe:value="bmV0d29yazoKICB2ZXJzaW9uOiAyCiAgZXRoZXJuZXRzOgogICAgbmljczoKICAgICAgbWF0Y2g6CiAgICAgICAgbmFtZTogZXRoKgogICAgICBkaGNwNDogeWVz"/>
</PropertySection>
</Environment>
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index 80028396..eaf13469 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -27,6 +27,20 @@
"sudo"
]
},
+ "eurolinux" : {
+ "build-requires" : [
+ "python3-devel"
+ ],
+ "requires" : [
+ "e2fsprogs",
+ "iproute",
+ "net-tools",
+ "procps",
+ "rsyslog",
+ "shadow-utils",
+ "sudo"
+ ]
+ },
"redhat" : {
"build-requires" : [
"python3-devel"
diff --git a/requirements.txt b/requirements.txt
index 5817da3b..c4adc455 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -32,3 +32,12 @@ jsonpatch
# For validating cloud-config sections per schema definitions
jsonschema
+
+# Used by DataSourceVMware to inspect the host's network configuration during
+# the "setup()" function.
+#
+# This allows a host that uses DHCP to bring up the network during BootLocal
+# and still participate in instance-data by gathering the network in detail at
+# runtime and merge that information into the metadata and repersist that to
+# disk.
+netifaces>=0.10.4
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 0713db16..3dbe5947 100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,7 +83,8 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["almalinux", "rhel", "fedora", "centos", "rocky"] %}
+{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel",
+ "rocky", "virtuozzo"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index c773e411..636f59be 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -12,7 +12,8 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["almalinux", "centos", "fedora", "rhel", "rocky"] %}
+{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel",
+ "rocky", "virtuozzo"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
index e366c042..cdb1c28d 100644
--- a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
+++ b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
@@ -14,14 +14,14 @@ cloud_config: |
# For example the configuration below will have the following section
# added to puppet.conf:
# [puppetd]
- # server=puppetmaster.example.org
+ # server=puppetserver.example.org
# certname=i-0123456.ip-X-Y-Z.cloud.internal
#
# The puppmaster ca certificate will be available in
# /var/lib/puppet/ssl/certs/ca.pem
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
# certname supports substitutions at runtime:
# %i: instanceid
# Example: i-0123456
@@ -31,11 +31,11 @@ cloud_config: |
# NB: the certname will automatically be lowercased as required by puppet
certname: "%i.%f"
# ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
+ # It holds the puppetserver certificate in pem format.
# It should be a multi-line string (using the | yaml notation for
# multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ # The puppetserver certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host.
#
ca_cert: |
-----BEGIN CERTIFICATE-----
diff --git a/tests/integration_tests/bugs/test_lp1920939.py b/tests/integration_tests/bugs/test_lp1920939.py
deleted file mode 100644
index 408792a6..00000000
--- a/tests/integration_tests/bugs/test_lp1920939.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""
-Test that disk setup can run successfully on a mounted partition when
-partprobe is being used.
-
-lp-1920939
-"""
-import json
-import os
-import pytest
-from uuid import uuid4
-from pycloudlib.lxd.instance import LXDInstance
-
-from cloudinit.subp import subp
-from tests.integration_tests.instances import IntegrationInstance
-
-DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4())
-
-
-def setup_and_mount_lxd_disk(instance: LXDInstance):
- subp('lxc config device add {} test-disk-setup-disk disk source={}'.format(
- instance.name, DISK_PATH).split())
-
-
-@pytest.yield_fixture
-def create_disk():
- # 640k should be enough for anybody
- subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split())
- yield
- os.remove(DISK_PATH)
-
-
-USERDATA = """\
-#cloud-config
-disk_setup:
- /dev/sdb:
- table_type: mbr
- layout: [50, 50]
- overwrite: True
-fs_setup:
- - label: test
- device: /dev/sdb1
- filesystem: ext4
- - label: test2
- device: /dev/sdb2
- filesystem: ext4
-mounts:
-- ["/dev/sdb1", "/mnt1"]
-- ["/dev/sdb2", "/mnt2"]
-"""
-
-UPDATED_USERDATA = """\
-#cloud-config
-disk_setup:
- /dev/sdb:
- table_type: mbr
- layout: [100]
- overwrite: True
-fs_setup:
- - label: test3
- device: /dev/sdb1
- filesystem: ext4
-mounts:
-- ["/dev/sdb1", "/mnt3"]
-"""
-
-
-def _verify_first_disk_setup(client, log):
- assert 'Traceback' not in log
- assert 'WARN' not in log
- lsblk = json.loads(client.execute('lsblk --json'))
- sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
- assert len(sdb['children']) == 2
- assert sdb['children'][0]['name'] == 'sdb1'
- assert sdb['children'][0]['mountpoint'] == '/mnt1'
- assert sdb['children'][1]['name'] == 'sdb2'
- assert sdb['children'][1]['mountpoint'] == '/mnt2'
-
-
-@pytest.mark.user_data(USERDATA)
-@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
-@pytest.mark.ubuntu
-@pytest.mark.lxd_vm
-# Not bionic or xenial because the LXD agent gets in the way of us
-# changing the userdata
-@pytest.mark.not_bionic
-@pytest.mark.not_xenial
-def test_disk_setup_when_mounted(create_disk, client: IntegrationInstance):
- """Test lp-1920939.
-
- We insert an extra disk into our VM, format it to have two partitions,
- modify our cloud config to mount devices before disk setup, and modify
- our userdata to setup a single partition on the disk.
-
- This allows cloud-init to attempt disk setup on a mounted partition.
- When blockdev is in use, it will fail with
- "blockdev: ioctl error on BLKRRPART: Device or resource busy" along
- with a warning and a traceback. When partprobe is in use, everything
- should work successfully.
- """
- log = client.read_from_file('/var/log/cloud-init.log')
- _verify_first_disk_setup(client, log)
-
- # Update our userdata and cloud.cfg to mount then perform new disk setup
- client.write_to_file(
- '/var/lib/cloud/seed/nocloud-net/user-data',
- UPDATED_USERDATA
- )
- client.execute("sed -i 's/write-files/write-files\\n - mounts/' "
- "/etc/cloud/cloud.cfg")
-
- client.execute('cloud-init clean --logs')
- client.restart()
-
- # Assert new setup works as expected
- assert 'Traceback' not in log
- assert 'WARN' not in log
-
- lsblk = json.loads(client.execute('lsblk --json'))
- sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
- assert len(sdb['children']) == 1
- assert sdb['children'][0]['name'] == 'sdb1'
- assert sdb['children'][0]['mountpoint'] == '/mnt3'
-
-
-@pytest.mark.user_data(USERDATA)
-@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
-@pytest.mark.ubuntu
-@pytest.mark.lxd_vm
-def test_disk_setup_no_partprobe(create_disk, client: IntegrationInstance):
- """Ensure disk setup still works as expected without partprobe."""
- # We can't do this part in a bootcmd because the path has already
- # been found by the time we get to the bootcmd
- client.execute('rm $(which partprobe)')
- client.execute('cloud-init clean --logs')
- client.restart()
-
- log = client.read_from_file('/var/log/cloud-init.log')
- _verify_first_disk_setup(client, log)
-
- assert 'partprobe' not in log
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
new file mode 100644
index 00000000..97b59558
--- /dev/null
+++ b/tests/integration_tests/modules/test_combined.py
@@ -0,0 +1,175 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""A set of somewhat unrelated tests that can be combined into a single
+instance launch. Generally tests should only be added here if a failure
+of the test would be unlikely to affect the running of another test using
+the same instance launch. Most independent module coherence tests can go
+here.
+"""
+import json
+import pytest
+import re
+from datetime import date
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches: [default]
+ uri: http://us.archive.ubuntu.com/ubuntu/
+byobu_by_default: enable
+final_message: |
+ This is my final message!
+ $version
+ $timestamp
+ $datasource
+ $uptime
+locale: en_GB.UTF-8
+locale_configfile: /etc/default/locale
+ntp:
+ servers: ['ntp.ubuntu.com']
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestCombined:
+ def test_final_message(self, class_client: IntegrationInstance):
+ """Test that final_message module works as expected.
+
+ Also tests LP 1511485: final_message is silent
+ """
+ client = class_client
+ log = client.read_from_file('/var/log/cloud-init.log')
+ today = date.today().strftime('%a, %d %b %Y')
+ expected = (
+ 'This is my final message!\n'
+ r'\d+\.\d+.*\n'
+ '{}.*\n'
+ 'DataSource.*\n'
+ r'\d+\.\d+'
+ ).format(today)
+
+ assert re.search(expected, log)
+
+ def test_ntp_with_apt(self, class_client: IntegrationInstance):
+ """LP #1628337.
+
+ cloud-init tries to install NTP before even
+ configuring the archives.
+ """
+ client = class_client
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'W: Failed to fetch' not in log
+ assert 'W: Some index files failed to download' not in log
+ assert 'E: Unable to locate package ntp' not in log
+
+ def test_byobu(self, class_client: IntegrationInstance):
+ """Test byobu configured as enabled by default."""
+ client = class_client
+ assert client.execute('test -e "/etc/byobu/autolaunch"').ok
+
+ def test_configured_locale(self, class_client: IntegrationInstance):
+ """Test locale can be configured correctly."""
+ client = class_client
+ default_locale = client.read_from_file('/etc/default/locale')
+ assert 'LANG=en_GB.UTF-8' in default_locale
+
+ locale_a = client.execute('locale -a')
+ verify_ordered_items_in_text([
+ 'en_GB.utf8',
+ 'en_US.utf8'
+ ], locale_a)
+
+ locale_gen = client.execute(
+ "cat /etc/locale.gen | grep -v '^#' | uniq"
+ )
+ verify_ordered_items_in_text([
+ 'en_GB.UTF-8',
+ 'en_US.UTF-8'
+ ], locale_gen)
+
+ def test_no_problems(self, class_client: IntegrationInstance):
+ """Test no errors, warnings, or tracebacks"""
+ client = class_client
+ status_file = client.read_from_file('/run/cloud-init/status.json')
+ status_json = json.loads(status_file)['v1']
+ for stage in ('init', 'init-local', 'modules-config', 'modules-final'):
+ assert status_json[stage]['errors'] == []
+ result_file = client.read_from_file('/run/cloud-init/result.json')
+ result_json = json.loads(result_file)['v1']
+ assert result_json['errors'] == []
+
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'WARN' not in log
+ assert 'Traceback' not in log
+
+ def _check_common_metadata(self, data):
+ assert data['base64_encoded_keys'] == []
+ assert data['merged_cfg'] == 'redacted for non-root user'
+
+ image_spec = ImageSpecification.from_os_image()
+ assert data['sys_info']['dist'][0] == image_spec.os
+
+ v1_data = data['v1']
+ assert re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release'])
+ assert v1_data['variant'] == image_spec.os
+ assert v1_data['distro'] == image_spec.os
+ assert v1_data['distro_release'] == image_spec.release
+ assert v1_data['machine'] == 'x86_64'
+ assert re.match(r'3.\d\.\d', v1_data['python_version'])
+
+ @pytest.mark.lxd_container
+ def test_instance_json_lxd(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ '/run/cloud-init/instance-data.json')
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data['v1']
+ assert v1_data['cloud_name'] == 'unknown'
+ assert v1_data['platform'] == 'lxd'
+ assert v1_data['subplatform'] == (
+ 'seed-dir (/var/lib/cloud/seed/nocloud-net)')
+ assert v1_data['availability_zone'] is None
+ assert v1_data['instance_id'] == client.instance.name
+ assert v1_data['local_hostname'] == client.instance.name
+ assert v1_data['region'] is None
+
+ @pytest.mark.lxd_vm
+ def test_instance_json_lxd_vm(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ '/run/cloud-init/instance-data.json')
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data['v1']
+ assert v1_data['cloud_name'] == 'unknown'
+ assert v1_data['platform'] == 'lxd'
+ assert v1_data['subplatform'] == (
+ 'seed-dir (/var/lib/cloud/seed/nocloud-net)')
+ assert v1_data['availability_zone'] is None
+ assert v1_data['instance_id'] == client.instance.name
+ assert v1_data['local_hostname'] == client.instance.name
+ assert v1_data['region'] is None
+
+ @pytest.mark.ec2
+ def test_instance_json_ec2(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ '/run/cloud-init/instance-data.json')
+ data = json.loads(instance_json_file)
+ v1_data = data['v1']
+ assert v1_data['cloud_name'] == 'aws'
+ assert v1_data['platform'] == 'ec2'
+ assert v1_data['subplatform'].startswith('metadata')
+ assert v1_data[
+ 'availability_zone'] == client.instance.availability_zone
+ assert v1_data['instance_id'] == client.instance.name
+ assert v1_data['local_hostname'].startswith('ip-')
+ assert v1_data['region'] == client.cloud.cloud_instance.region
diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py
new file mode 100644
index 00000000..15033642
--- /dev/null
+++ b/tests/integration_tests/modules/test_command_output.py
@@ -0,0 +1,23 @@
+"""Integration test for output redirection.
+
+This test redirects the output of a command to a file and then checks the file.
+
+(This is ported from
+``tests/cloud_tests/testcases/main/command_output_simple.yaml``.)"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+USER_DATA = """\
+#cloud-config
+output: { all: "| tee -a /var/log/cloud-init-test-output" }
+final_message: "should be last line in cloud-init-test-output file"
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+def test_runcmd(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init-test-output')
+ assert 'should be last line in cloud-init-test-output file' in log
diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py
new file mode 100644
index 00000000..1fc96c52
--- /dev/null
+++ b/tests/integration_tests/modules/test_disk_setup.py
@@ -0,0 +1,192 @@
+import json
+import os
+import pytest
+from uuid import uuid4
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+
+DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4())
+
+
+def setup_and_mount_lxd_disk(instance: LXDInstance):
+ subp('lxc config device add {} test-disk-setup-disk disk source={}'.format(
+ instance.name, DISK_PATH).split())
+
+
+@pytest.yield_fixture
+def create_disk():
+ # 640k should be enough for anybody
+ subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split())
+ yield
+ os.remove(DISK_PATH)
+
+
+ALIAS_USERDATA = """\
+#cloud-config
+device_aliases:
+ my_alias: /dev/sdb
+disk_setup:
+ my_alias:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+- label: fs1
+ device: my_alias.1
+ filesystem: ext4
+- label: fs2
+ device: my_alias.2
+ filesystem: ext4
+mounts:
+- ["my_alias.1", "/mnt1"]
+- ["my_alias.2", "/mnt2"]
+"""
+
+
+@pytest.mark.user_data(ALIAS_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestDeviceAliases:
+ """Test devices aliases work on disk setup/mount"""
+
+ def test_device_alias(self, create_disk, client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert (
+ "updated disk_setup device entry 'my_alias' to '/dev/sdb'"
+ ) in log
+ assert 'changed my_alias.1 => /dev/sdb1' in log
+ assert 'changed my_alias.2 => /dev/sdb2' in log
+ assert 'WARN' not in log
+ assert 'Traceback' not in log
+
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 2
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt1'
+ assert sdb['children'][1]['name'] == 'sdb2'
+ assert sdb['children'][1]['mountpoint'] == '/mnt2'
+
+
+PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+ - label: test
+ device: /dev/sdb1
+ filesystem: ext4
+ - label: test2
+ device: /dev/sdb2
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt1"]
+- ["/dev/sdb2", "/mnt2"]
+"""
+
+UPDATED_PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [100]
+ overwrite: True
+fs_setup:
+ - label: test3
+ device: /dev/sdb1
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt3"]
+"""
+
+
+@pytest.mark.user_data(PARTPROBE_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestPartProbeAvailability:
+ """Test disk setup works with partprobe
+
+ Disk setup can run successfully on a mounted partition when
+ partprobe is being used.
+
+ lp-1920939
+ """
+
+ def _verify_first_disk_setup(self, client, log):
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 2
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt1'
+ assert sdb['children'][1]['name'] == 'sdb2'
+ assert sdb['children'][1]['mountpoint'] == '/mnt2'
+
+ # Not bionic or xenial because the LXD agent gets in the way of us
+ # changing the userdata
+ @pytest.mark.not_bionic
+ @pytest.mark.not_xenial
+ def test_disk_setup_when_mounted(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Test lp-1920939.
+
+ We insert an extra disk into our VM, format it to have two partitions,
+ modify our cloud config to mount devices before disk setup, and modify
+ our userdata to setup a single partition on the disk.
+
+ This allows cloud-init to attempt disk setup on a mounted partition.
+ When blockdev is in use, it will fail with
+ "blockdev: ioctl error on BLKRRPART: Device or resource busy" along
+ with a warning and a traceback. When partprobe is in use, everything
+ should work successfully.
+ """
+ log = client.read_from_file('/var/log/cloud-init.log')
+ self._verify_first_disk_setup(client, log)
+
+ # Update our userdata and cloud.cfg to mount then perform new disk
+ # setup
+ client.write_to_file(
+ '/var/lib/cloud/seed/nocloud-net/user-data',
+ UPDATED_PARTPROBE_USERDATA,
+ )
+ client.execute(
+ "sed -i 's/write-files/write-files\\n - mounts/' "
+ "/etc/cloud/cloud.cfg"
+ )
+
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+ # Assert new setup works as expected
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 1
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt3'
+
+ def test_disk_setup_no_partprobe(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Ensure disk setup still works as expected without partprobe."""
+ # We can't do this part in a bootcmd because the path has already
+ # been found by the time we get to the bootcmd
+ client.execute('rm $(which partprobe)')
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+ log = client.read_from_file('/var/log/cloud-init.log')
+ self._verify_first_disk_setup(client, log)
+
+ assert 'partprobe' not in log
diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py
index b683566f..a42d1c8c 100644
--- a/tests/integration_tests/modules/test_hotplug.py
+++ b/tests/integration_tests/modules/test_hotplug.py
@@ -48,7 +48,7 @@ def test_hotplug_add_remove(client: IntegrationInstance):
# Add new NIC
added_ip = client.instance.add_network_interface()
- _wait_till_hotplug_complete(client)
+ _wait_till_hotplug_complete(client, expected_runs=2)
ips_after_add = _get_ip_addr(client)
new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0]
@@ -63,7 +63,7 @@ def test_hotplug_add_remove(client: IntegrationInstance):
# Remove new NIC
client.instance.remove_network_interface(added_ip)
- _wait_till_hotplug_complete(client, expected_runs=2)
+ _wait_till_hotplug_complete(client, expected_runs=4)
ips_after_remove = _get_ip_addr(client)
assert len(ips_after_remove) == len(ips_before)
assert added_ip not in [ip.ip4 for ip in ips_after_remove]
@@ -72,6 +72,10 @@ def test_hotplug_add_remove(client: IntegrationInstance):
config = yaml.safe_load(netplan_cfg)
assert new_addition.interface not in config['network']['ethernets']
+ assert 'enabled' == client.execute(
+ 'cloud-init devel hotplug-hook -s net query'
+ )
+
@pytest.mark.openstack
def test_no_hotplug_in_userdata(client: IntegrationInstance):
@@ -83,7 +87,7 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance):
client.instance.add_network_interface()
_wait_till_hotplug_complete(client)
log = client.read_from_file('/var/log/cloud-init.log')
- assert 'hotplug not enabled for event of type network' in log
+ assert "Event Denied: scopes=['network'] EventType=hotplug" in log
ips_after_add = _get_ip_addr(client)
if len(ips_after_add) == len(ips_before) + 1:
@@ -92,3 +96,7 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance):
assert new_ip.state == 'DOWN'
else:
assert len(ips_after_add) == len(ips_before)
+
+ assert 'disabled' == client.execute(
+ 'cloud-init devel hotplug-hook -s net query'
+ )
diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py
index e72389c1..7a799139 100644
--- a/tests/integration_tests/modules/test_ntp_servers.py
+++ b/tests/integration_tests/modules/test_ntp_servers.py
@@ -1,15 +1,19 @@
-"""Integration test for the ntp module's ``servers`` functionality with ntp.
+"""Integration test for the ntp module's ntp functionality.
This test specifies the use of the `ntp` NTP client, and ensures that the given
NTP servers are configured as expected.
-(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``.)
+(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``,
+``tests/cloud_tests/testcases/modules/ntp_pools.yaml``,
+and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``)
"""
import re
import yaml
import pytest
+from tests.integration_tests.instances import IntegrationInstance
+
USER_DATA = """\
#cloud-config
ntp:
@@ -17,21 +21,26 @@ ntp:
servers:
- 172.16.15.14
- 172.16.17.18
+ pools:
+ - 0.cloud-init.mypool
+ - 1.cloud-init.mypool
+ - 172.16.15.15
"""
EXPECTED_SERVERS = yaml.safe_load(USER_DATA)["ntp"]["servers"]
+EXPECTED_POOLS = yaml.safe_load(USER_DATA)["ntp"]["pools"]
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestNtpServers:
- def test_ntp_installed(self, class_client):
+ def test_ntp_installed(self, class_client: IntegrationInstance):
"""Test that `ntpd --version` succeeds, indicating installation."""
- result = class_client.execute("ntpd --version")
- assert 0 == result.return_code
+ assert class_client.execute("ntpd --version").ok
- def test_dist_config_file_is_empty(self, class_client):
+ def test_dist_config_file_is_empty(self,
+ class_client: IntegrationInstance):
"""Test that the distributed config file is empty.
(This test is skipped on all currently supported Ubuntu releases, so
@@ -42,7 +51,7 @@ class TestNtpServers:
dist_file = class_client.read_from_file("/etc/ntp.conf.dist")
assert 0 == len(dist_file.strip().splitlines())
- def test_ntp_entries(self, class_client):
+ def test_ntp_entries(self, class_client: IntegrationInstance):
ntp_conf = class_client.read_from_file("/etc/ntp.conf")
for expected_server in EXPECTED_SERVERS:
assert re.search(
@@ -50,9 +59,69 @@ class TestNtpServers:
ntp_conf,
re.MULTILINE
)
+ for expected_pool in EXPECTED_POOLS:
+ assert re.search(
+ r"^pool {} iburst".format(expected_pool),
+ ntp_conf,
+ re.MULTILINE
+ )
- def test_ntpq_servers(self, class_client):
+ def test_ntpq_servers(self, class_client: IntegrationInstance):
result = class_client.execute("ntpq -p -w -n")
assert result.ok
- for expected_server in EXPECTED_SERVERS:
- assert expected_server in result.stdout
+ for expected_server_or_pool in [*EXPECTED_SERVERS, *EXPECTED_POOLS]:
+ assert expected_server_or_pool in result.stdout
+
+
+CHRONY_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: chrony
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(CHRONY_DATA)
+def test_chrony(client: IntegrationInstance):
+ if client.execute('test -f /etc/chrony.conf').ok:
+ chrony_conf = '/etc/chrony.conf'
+ else:
+ chrony_conf = '/etc/chrony/chrony.conf'
+ contents = client.read_from_file(chrony_conf)
+ assert '.pool.ntp.org' in contents
+
+
+TIMESYNCD_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: systemd-timesyncd
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(TIMESYNCD_DATA)
+def test_timesyncd(client: IntegrationInstance):
+ contents = client.read_from_file(
+ '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
+ )
+ assert '.pool.ntp.org' in contents
+
+
+EMPTY_NTP = """\
+#cloud-config
+ntp:
+ ntp_client: ntp
+ pools: []
+ servers: []
+"""
+
+
+@pytest.mark.user_data(EMPTY_NTP)
+def test_empty_ntp(client: IntegrationInstance):
+ assert client.execute('ntpd --version').ok
+ assert client.execute('test -f /etc/ntp.conf.dist').failed
+ assert 'pool.ntp.org iburst' in client.execute(
+ 'grep -v "^#" /etc/ntp.conf'
+ )
diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py
index 481edbaa..652efa68 100644
--- a/tests/integration_tests/modules/test_snap.py
+++ b/tests/integration_tests/modules/test_snap.py
@@ -4,7 +4,7 @@ This test specifies a command to be executed by the ``snap`` module
and then checks that if that command was executed during boot.
(This is ported from
-``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
+``tests/cloud_tests/testcases/modules/snap.yaml``.)"""
import pytest
diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py
index 3db573b5..b90fe95f 100644
--- a/tests/integration_tests/modules/test_ssh_import_id.py
+++ b/tests/integration_tests/modules/test_ssh_import_id.py
@@ -12,6 +12,7 @@ TODO:
import pytest
+from tests.integration_tests.util import retry
USER_DATA = """\
#cloud-config
@@ -26,6 +27,11 @@ ssh_import_id:
class TestSshImportId:
@pytest.mark.user_data(USER_DATA)
+ # Retry is needed here because ssh import id is one of the last modules
+ # run, and it fires off a web request, then continues with the rest of
+ # cloud-init. It is possible cloud-init's status is "done" before the
+ # id's have been fully imported.
+ @retry(tries=30, delay=1)
def test_ssh_import_id(self, client):
ssh_output = client.read_from_file(
"/home/ubuntu/.ssh/authorized_keys")
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index ce62ffc8..80430eab 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -1,3 +1,4 @@
+import functools
import logging
import multiprocessing
import os
@@ -64,3 +65,32 @@ def get_test_rsa_keypair(key_name: str = 'test1') -> key_pair:
with private_key_path.open() as private_file:
private_key = private_file.read()
return key_pair(public_key, private_key)
+
+
+def retry(*, tries: int = 30, delay: int = 1):
+ """Decorator for retries.
+
+ Retry a function until code no longer raises an exception or
+ max tries is reached.
+
+ Example:
+ @retry(tries=5, delay=1)
+ def try_something_that_may_not_be_ready():
+ ...
+ """
+ def _retry(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ last_error = None
+ for _ in range(tries):
+ try:
+ func(*args, **kwargs)
+ break
+ except Exception as e:
+ last_error = e
+ time.sleep(delay)
+ else:
+ if last_error:
+ raise last_error
+ return wrapper
+ return _retry
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index fdb4026c..a39e1d0c 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -225,7 +225,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
expected_doc_sections = [
'**Supported distros:** all',
('**Supported distros:** almalinux, alpine, centos, debian, '
- 'fedora, opensuse, photon, rhel, rocky, sles, ubuntu'),
+ 'eurolinux, fedora, opensuse, photon, rhel, rocky, sles, ubuntu, '
+ 'virtuozzo'),
'**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
'**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
]
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 54e06119..03609c3d 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -635,15 +635,20 @@ scbus-1 on xpt0 bus 0
def _get_ds(self, data, agent_command=None, distro='ubuntu',
apply_network=None, instance_id=None):
- def dsdevs():
- return data.get('dsdevs', [])
-
def _wait_for_files(flist, _maxwait=None, _naplen=None):
data['waited'] = flist
return []
+ def _load_possible_azure_ds(seed_dir, cache_dir):
+ yield seed_dir
+ yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
+ yield from data.get('dsdevs', [])
+ if cache_dir:
+ yield cache_dir
+
+ seed_dir = os.path.join(self.paths.seed_dir, "azure")
if data.get('ovfcontent') is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
+ populate_dir(seed_dir,
{'ovf-env.xml': data['ovfcontent']})
dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
@@ -654,6 +659,8 @@ scbus-1 on xpt0 bus 0
self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
self.m_ephemeral_dhcpv4 = mock.MagicMock()
self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
+ self.m_list_possible_azure_ds = mock.MagicMock(
+ side_effect=_load_possible_azure_ds)
if instance_id:
self.instance_id = instance_id
@@ -667,7 +674,8 @@ scbus-1 on xpt0 bus 0
return '7783-7084-3265-9085-8269-3286-77'
self.apply_patches([
- (dsaz, 'list_possible_azure_ds_devs', dsdevs),
+ (dsaz, 'list_possible_azure_ds',
+ self.m_list_possible_azure_ds),
(dsaz, 'perform_hostname_bounce', mock.MagicMock()),
(dsaz, 'get_hostname', mock.MagicMock()),
(dsaz, 'set_hostname', mock.MagicMock()),
@@ -844,9 +852,14 @@ scbus-1 on xpt0 bus 0
"""When a device path is used, present that in subplatform."""
data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']}
dsrc = self._get_ds(data)
+ # DSAzure will attempt to mount /dev/sr0 first, which should
+ # fail with mount error since the list of devices doesn't have
+ # /dev/sr0
with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb:
- m_mount_cb.return_value = (
- {'local-hostname': 'me'}, 'ud', {'cfg': ''}, {})
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {})
+ ]
self.assertTrue(dsrc.get_data())
self.assertEqual(dsrc.userdata_raw, 'ud')
self.assertEqual(dsrc.metadata['local-hostname'], 'me')
@@ -1608,12 +1621,19 @@ scbus-1 on xpt0 bus 0
@mock.patch(MOCKPATH + 'util.is_FreeBSD')
@mock.patch(MOCKPATH + '_check_freebsd_cdrom')
- def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom,
- m_is_FreeBSD):
+ def test_list_possible_azure_ds(self, m_check_fbsd_cdrom,
+ m_is_FreeBSD):
"""On FreeBSD, possible devs should show /dev/cd0."""
m_is_FreeBSD.return_value = True
m_check_fbsd_cdrom.return_value = True
- self.assertEqual(dsaz.list_possible_azure_ds_devs(), ['/dev/cd0'])
+ possible_ds = []
+ for src in dsaz.list_possible_azure_ds(
+ "seed_dir", "cache_dir"):
+ possible_ds.append(src)
+ self.assertEqual(possible_ds, ["seed_dir",
+ dsaz.DEFAULT_PROVISIONING_ISO_DEV,
+ "/dev/cd0",
+ "cache_dir"])
self.assertEqual(
[mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
@@ -1967,11 +1987,19 @@ class TestAzureBounce(CiTestCase):
with_logs = True
def mock_out_azure_moving_parts(self):
+
+ def _load_possible_azure_ds(seed_dir, cache_dir):
+ yield seed_dir
+ yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
+ if cache_dir:
+ yield cache_dir
+
self.patches.enter_context(
mock.patch.object(dsaz.util, 'wait_for_files'))
self.patches.enter_context(
- mock.patch.object(dsaz, 'list_possible_azure_ds_devs',
- mock.MagicMock(return_value=[])))
+ mock.patch.object(
+ dsaz, 'list_possible_azure_ds',
+ mock.MagicMock(side_effect=_load_possible_azure_ds)))
self.patches.enter_context(
mock.patch.object(dsaz, 'get_metadata_from_fabric',
mock.MagicMock(return_value={})))
@@ -2797,7 +2825,8 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
def test_check_if_nic_is_primary_retries_on_failures(
self, m_dhcpv4, m_imds):
- """Retry polling for network metadata on all failures except timeout"""
+ """Retry polling for network metadata on all failures except timeout
+ and network unreachable errors"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
lease = {
'interface': 'eth9', 'fixed-address': '192.168.2.9',
@@ -2826,8 +2855,13 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
error = url_helper.UrlError(cause=cause, code=410)
eth0Retries.append(exc_cb("No goal state.", error))
else:
- cause = requests.Timeout('Fake connection timeout')
for _ in range(0, 10):
+ # We are expected to retry for a certain period for both
+ # timeout errors and network unreachable errors.
+ if _ < 5:
+ cause = requests.Timeout('Fake connection timeout')
+ else:
+ cause = requests.ConnectionError('Network Unreachable')
error = url_helper.UrlError(cause=cause)
eth1Retries.append(exc_cb("Connection timeout", error))
# Should stop retrying after 10 retries
@@ -2873,6 +2907,25 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
dsa.wait_for_link_up("eth0")
self.assertEqual(1, m_is_link_up.call_count)
+ @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
+ @mock.patch(MOCKPATH + 'util.write_file')
+ @mock.patch('cloudinit.net.read_sys_net')
+ @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
+ def test_wait_for_link_up_checks_link_after_sleep(
+ self, m_is_link_up, m_read_sys_net, m_writefile, m_is_up):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch('ubuntu')
+ distro = distro_cls('ubuntu', {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_is_link_up.return_value = False
+ m_is_up.return_value = True
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_is_link_up.call_count)
+ self.assertEqual(1, m_is_up.call_count)
+
@mock.patch(MOCKPATH + 'util.write_file')
@mock.patch('cloudinit.net.read_sys_net')
@mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 5e9c547a..00f0a78c 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -29,6 +29,7 @@ from cloudinit.sources import (
DataSourceSmartOS as SmartOS,
DataSourceUpCloud as UpCloud,
DataSourceVultr as Vultr,
+ DataSourceVMware as VMware,
)
from cloudinit.sources import DataSourceNone as DSNone
@@ -52,6 +53,7 @@ DEFAULT_LOCAL = [
RbxCloud.DataSourceRbxCloud,
Scaleway.DataSourceScaleway,
UpCloud.DataSourceUpCloudLocal,
+ VMware.DataSourceVMware,
]
DEFAULT_NETWORK = [
@@ -68,6 +70,7 @@ DEFAULT_NETWORK = [
OpenStack.DataSourceOpenStack,
OVF.DataSourceOVFNet,
UpCloud.DataSourceUpCloud,
+ VMware.DataSourceVMware,
]
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index e2718077..9f52b504 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -83,6 +83,103 @@ class TestReadOvfEnv(CiTestCase):
self.assertEqual({'password': "passw0rd"}, cfg)
self.assertIsNone(ud)
+ def test_with_b64_network_config_enable_read_network(self):
+ network_config = dedent("""\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """)
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {"network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual("inst-001", md["instance-id"])
+ self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertEqual(
+ {'version': 2, 'ethernets':
+ {'nics':
+ {'nameservers':
+ {'addresses': ['127.0.0.53'],
+ 'search': ['eng.vmware.com', 'vmware.com']},
+ 'match': {'name': 'eth*'},
+ 'gateway4': '10.10.10.253',
+ 'dhcp4': False,
+ 'addresses': ['10.10.10.1/24']}}},
+ md["network-config"])
+ self.assertIsNone(ud)
+
+ def test_with_non_b64_network_config_enable_read_network(self):
+ network_config = dedent("""\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """)
+ props = {"network-config": network_config,
+ "password": "passw0rd",
+ "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_disable_read_network(self):
+ network_config = dedent("""\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """)
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {"network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({'password': "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
class TestMarkerFiles(CiTestCase):
diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py
new file mode 100644
index 00000000..597db7c8
--- /dev/null
+++ b/tests/unittests/test_datasource/test_vmware.py
@@ -0,0 +1,377 @@
+# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import gzip
+from cloudinit import dmi, helpers, safeyaml
+from cloudinit import settings
+from cloudinit.sources import DataSourceVMware
+from cloudinit.tests.helpers import (
+ mock,
+ CiTestCase,
+ FilesystemMockingTestCase,
+ populate_dir,
+)
+
+import os
+
+PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name"
+PRODUCT_NAME = "VMware7,1"
+PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB"
+REROOT_FILES = {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+}
+
+VMW_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com",
+]
+VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com"
+
+VMW_METADATA_YAML = """instance-id: cloud-vm
+local-hostname: cloud-vm
+network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+"""
+
+VMW_USERDATA_YAML = """## template: jinja
+#cloud-config
+users:
+- default
+"""
+
+VMW_VENDORDATA_YAML = """## template: jinja
+#cloud-config
+runcmd:
+- echo "Hello, world."
+"""
+
+
+class TestDataSourceVMware(CiTestCase):
+ """
+ Test common functionality that is not transport specific.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMware, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_no_data_access_method(self):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+ def test_get_host_info(self):
+ host_info = DataSourceVMware.get_host_info()
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+
+
+class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase):
+ """
+ Test the envvar transport.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareEnvVars, self).setUp()
+ self.tmp = self.tmp_dir()
+ os.environ[DataSourceVMware.VMX_GUESTINFO] = "1"
+ self.create_system_files()
+
+ def tearDown(self):
+ del os.environ[DataSourceVMware.VMX_GUESTINFO]
+ return super(TestDataSourceVMwareEnvVars, self).tearDown()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR,
+ DataSourceVMware.get_guestinfo_envvar_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_only(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method,
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ def test_ds_valid_on_vmware_platform(self):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, PRODUCT_NAME)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ DataSourceVMware.get_guestinfo_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a non-VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_ds_invalid_on_non_vmware_platform(self, m_fn):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, None)
+
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+
+def assert_metadata(test_obj, ds, metadata):
+ test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id())
+ test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname())
+
+ expected_public_keys = metadata.get("public_keys")
+ if not isinstance(expected_public_keys, list):
+ expected_public_keys = [expected_public_keys]
+
+ test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys())
+ test_obj.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+def get_ds(temp_dir):
+ ds = DataSourceVMware.DataSourceVMware(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir})
+ )
+ ds.vmware_rpctool = "vmware-rpctool"
+ return ds
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index 94ab052d..021866b7 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -23,7 +23,7 @@ class MyBaseDistro(distros.Distro):
def _write_network(self, settings):
raise NotImplementedError()
- def package_command(self, cmd, args=None, pkgs=None):
+ def package_command(self, command, args=None, pkgs=None):
raise NotImplementedError()
def update_package_sources(self):
diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/test_distros/test_photon.py
new file mode 100644
index 00000000..1c3145ca
--- /dev/null
+++ b/tests/unittests/test_distros/test_photon.py
@@ -0,0 +1,68 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from . import _get_distro
+from cloudinit import util
+from cloudinit.tests.helpers import mock
+from cloudinit.tests.helpers import CiTestCase
+
+SYSTEM_INFO = {
+ 'paths': {
+ 'cloud_dir': '/var/lib/cloud/',
+ 'templates_dir': '/etc/cloud/templates/',
+ },
+ 'network': {'renderers': 'networkd'},
+}
+
+
+class TestPhoton(CiTestCase):
+ with_logs = True
+ distro = _get_distro('photon', SYSTEM_INFO)
+ expected_log_line = 'Rely on PhotonOS default network config'
+
+ def test_network_renderer(self):
+ self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd')
+
+ def test_get_distro(self):
+ self.assertEqual(self.distro.osfamily, 'photon')
+
+ @mock.patch("cloudinit.distros.photon.subp.subp")
+ def test_write_hostname(self, m_subp):
+ hostname = 'myhostname'
+ hostfile = self.tmp_path('previous-hostname')
+ self.distro._write_hostname(hostname, hostfile)
+ self.assertEqual(hostname, util.load_file(hostfile))
+
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ m_subp.return_value = (None, None)
+ hostfile += 'hostfile'
+ self.distro._write_hostname(hostname, hostfile)
+
+ m_subp.return_value = (hostname, None)
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ self.logs.truncate(0)
+ m_subp.return_value = (None, 'bla')
+ self.distro._write_hostname(hostname, None)
+ self.assertIn('Error while setting hostname', self.logs.getvalue())
+
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_fallback_netcfg(self, m_fallback_cfg):
+
+ key = 'disable_fallback_netcfg'
+ # Don't use fallback if no setting given
+ self.logs.truncate(0)
+ assert(self.distro.generate_fallback_config() is None)
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = True
+ assert(self.distro.generate_fallback_config() is None)
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = False
+ assert(self.distro.generate_fallback_config() is not None)
+ self.assertNotIn(self.expected_log_line, self.logs.getvalue())
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 1d8aaf18..8617d7bd 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -649,6 +649,50 @@ class TestDsIdentify(DsIdentifyBase):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
self._test_ds_not_found('Ec2-E24Cloud-negative')
+ def test_vmware_no_valid_transports(self):
+ """VMware: no valid transports"""
+ self._test_ds_not_found('VMware-NoValidTransports')
+
+ def test_vmware_envvar_no_data(self):
+ """VMware: envvar transport no data"""
+ self._test_ds_not_found('VMware-EnvVar-NoData')
+
+ def test_vmware_envvar_no_virt_id(self):
+ """VMware: envvar transport success if no virt id"""
+ self._test_ds_found('VMware-EnvVar-NoVirtID')
+
+ def test_vmware_envvar_activated_by_metadata(self):
+ """VMware: envvar transport activated by metadata"""
+ self._test_ds_found('VMware-EnvVar-Metadata')
+
+ def test_vmware_envvar_activated_by_userdata(self):
+ """VMware: envvar transport activated by userdata"""
+ self._test_ds_found('VMware-EnvVar-Userdata')
+
+ def test_vmware_envvar_activated_by_vendordata(self):
+ """VMware: envvar transport activated by vendordata"""
+ self._test_ds_found('VMware-EnvVar-Vendordata')
+
+ def test_vmware_guestinfo_no_data(self):
+ """VMware: guestinfo transport no data"""
+ self._test_ds_not_found('VMware-GuestInfo-NoData')
+
+ def test_vmware_guestinfo_no_virt_id(self):
+ """VMware: guestinfo transport fails if no virt id"""
+ self._test_ds_not_found('VMware-GuestInfo-NoVirtID')
+
+ def test_vmware_guestinfo_activated_by_metadata(self):
+ """VMware: guestinfo transport activated by metadata"""
+ self._test_ds_found('VMware-GuestInfo-Metadata')
+
+ def test_vmware_guestinfo_activated_by_userdata(self):
+ """VMware: guestinfo transport activated by userdata"""
+ self._test_ds_found('VMware-GuestInfo-Userdata')
+
+ def test_vmware_guestinfo_activated_by_vendordata(self):
+ """VMware: guestinfo transport activated by vendordata"""
+ self._test_ds_found('VMware-GuestInfo-Vendordata')
+
class TestBSDNoSys(DsIdentifyBase):
"""Test *BSD code paths
@@ -1136,7 +1180,240 @@ VALID_CFG = {
'Ec2-E24Cloud-negative': {
'ds': 'Ec2',
'files': {P_SYS_VENDOR: 'e24cloudyday\n'},
- }
+ },
+ 'VMware-NoValidTransports': {
+ 'ds': 'VMware',
+ 'mocks': [
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-NoData': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-NoVirtID': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ ],
+ },
+ 'VMware-EnvVar-Metadata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-Userdata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-EnvVar-Vendordata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo',
+ 'ret': 0,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata',
+ 'ret': 0,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-NoData': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-NoVirtID': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ ],
+ },
+ 'VMware-GuestInfo-Metadata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-Userdata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ 'VMware-GuestInfo-Vendordata': {
+ 'ds': 'VMware',
+ 'mocks': [
+ {
+ 'name': 'vmware_has_rpctool',
+ 'ret': 0,
+ 'out': '/usr/bin/vmware-rpctool',
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_metadata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_userdata',
+ 'ret': 1,
+ },
+ {
+ 'name': 'vmware_rpctool_guestinfo_vendordata',
+ 'ret': 0,
+ 'out': '---',
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index e87069f6..69e8b30d 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -133,6 +133,15 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
disk_path,
cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
+ def test_device_aliases_remapping(self):
+ disk_path = '/dev/sda'
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(disk_path,
+ cc_mounts.sanitize_devname('mydata',
+ lambda x: None,
+ mock.Mock(),
+ {'mydata': disk_path}))
+
class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
index 62388ac6..b7891ab4 100644
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ b/tests/unittests/test_handler/test_handler_puppet.py
@@ -3,7 +3,7 @@
from cloudinit.config import cc_puppet
from cloudinit.sources import DataSourceNone
from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.tests.helpers import CiTestCase, mock
+from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock
import logging
import textwrap
@@ -63,7 +63,8 @@ class TestPuppetHandle(CiTestCase):
super(TestPuppetHandle, self).setUp()
self.new_root = self.tmp_dir()
self.conf = self.tmp_path('puppet.conf')
- self.csr_attributes_path = self.tmp_path('csr_attributes.yaml')
+ self.csr_attributes_path = self.tmp_path(
+ 'csr_attributes.yaml')
def _get_cloud(self, distro):
paths = helpers.Paths({'templates_dir': self.new_root})
@@ -72,7 +73,7 @@ class TestPuppetHandle(CiTestCase):
myds = DataSourceNone.DataSourceNone({}, mydist, paths)
return cloud.Cloud(myds, paths, {}, mydist, None)
- def test_handler_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
+ def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
"""Cloud-config containing no 'puppet' key is skipped."""
mycloud = self._get_cloud('ubuntu')
cfg = {}
@@ -81,19 +82,19 @@ class TestPuppetHandle(CiTestCase):
"no 'puppet' configuration found", self.logs.getvalue())
self.assertEqual(0, m_auto.call_count)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto):
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_starts_puppet_service(self, m_subp, m_auto):
"""Cloud-config 'puppet' configuration starts puppet."""
mycloud = self._get_cloud('ubuntu')
cfg = {'puppet': {'install': False}}
cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
self.assertEqual(1, m_auto.call_count)
- self.assertEqual(
+ self.assertIn(
[mock.call(['service', 'puppet', 'start'], capture=False)],
m_subp.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
"""Cloud-config empty 'puppet' configuration installs latest puppet."""
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
@@ -103,8 +104,8 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _):
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_on_true(self, m_subp, _):
"""Cloud-config with 'puppet' key installs when 'install' is True."""
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
@@ -114,8 +115,85 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_version(self, m_subp, _):
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio'."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True, 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ None, None, True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_version(self,
+ m_subp, m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'version' is specified."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True,
+ 'version': '6.24.0', 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ '6.24.0', None, True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_collection(self,
+ m_subp,
+ m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'collection' is specified."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True,
+ 'collection': 'puppet6', 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ None, 'puppet6', True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_custom_url(self,
+ m_subp,
+ m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'aio_install_url' is specified."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet':
+ {'install': True,
+ 'aio_install_url': 'http://test.url/path/to/script.sh',
+ 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ 'http://test.url/path/to/script.sh', None, None, True)
+
+ @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_without_cleanup(self,
+ m_subp,
+ m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and no cleanup."""
+ mycloud = self._get_cloud('ubuntu')
+ mycloud.distro = mock.MagicMock()
+ cfg = {'puppet': {'install': True,
+ 'cleanup': False, 'install_type': 'aio'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL,
+ None, None, False)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_installs_puppet_version(self, m_subp, _):
"""Cloud-config 'puppet' configuration can specify a version."""
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
@@ -125,26 +203,39 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', '3.8'))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto):
+ @mock.patch('cloudinit.config.cc_puppet.get_config_value')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_config_updates_puppet_conf(self,
+ m_subp, m_default, m_auto):
"""When 'conf' is provided update values in PUPPET_CONF_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.conf
+
+ m_default.side_effect = _fake_get_config_value
mycloud = self._get_cloud('ubuntu')
cfg = {
'puppet': {
- 'conf': {'agent': {'server': 'puppetmaster.example.org'}}}}
- util.write_file(self.conf, '[agent]\nserver = origpuppet\nother = 3')
- puppet_conf_path = 'cloudinit.config.cc_puppet.PUPPET_CONF_PATH'
+ 'conf': {'agent': {'server': 'puppetserver.example.org'}}}}
+ util.write_file(
+ self.conf, '[agent]\nserver = origpuppet\nother = 3')
mycloud.distro = mock.MagicMock()
- with mock.patch(puppet_conf_path, self.conf):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
content = util.load_file(self.conf)
- expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n'
+ expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n'
self.assertEqual(expected, content)
+ @mock.patch('cloudinit.config.cc_puppet.get_config_value')
@mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto):
+ def test_puppet_writes_csr_attributes_file(self,
+ m_subp, m_default, m_auto):
"""When csr_attributes is provided
creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.csr_attributes_path
+
+ m_default.side_effect = _fake_get_config_value
mycloud = self._get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
cfg = {
@@ -163,10 +254,7 @@ class TestPuppetHandle(CiTestCase):
}
}
}
- csr_attributes = 'cloudinit.config.cc_puppet.' \
- 'PUPPET_CSR_ATTRIBUTES_PATH'
- with mock.patch(csr_attributes, self.csr_attributes_path):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
content = util.load_file(self.csr_attributes_path)
expected = textwrap.dedent("""\
custom_attributes:
@@ -177,3 +265,101 @@ class TestPuppetHandle(CiTestCase):
pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
""")
self.assertEqual(expected, content)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {'puppet': {'exec': True}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(['puppet', 'agent', '--test'], capture=False)],
+ m_subp.call_args_list)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_list_if_requested(self,
+ m_subp, m_auto):
+ """Run puppet with 'exec_args' list if 'exec' is set to True."""
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {'puppet': {'exec': True, 'exec_args': [
+ '--onetime', '--detailed-exitcodes']}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(
+ ['puppet', 'agent', '--onetime', '--detailed-exitcodes'],
+ capture=False)],
+ m_subp.call_args_list)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_string_if_requested(self,
+ m_subp, m_auto):
+ """Run puppet with 'exec_args' string if 'exec' is set to True."""
+ mycloud = self._get_cloud('ubuntu')
+ cfg = {'puppet': {'exec': True,
+ 'exec_args': '--onetime --detailed-exitcodes'}}
+ cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(
+ ['puppet', 'agent', '--onetime', '--detailed-exitcodes'],
+ capture=False)],
+ m_subp.call_args_list)
+
+
+URL_MOCK = mock.Mock()
+URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"'
+
+
+@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=(None, None))
+@mock.patch(
+ 'cloudinit.config.cc_puppet.url_helper.readurl',
+ return_value=URL_MOCK, autospec=True,
+)
+class TestInstallPuppetAio(HttprettyTestCase):
+ def test_install_with_default_arguments(self, m_readurl, m_subp):
+ """Install AIO with no arguments"""
+ cc_puppet.install_puppet_aio()
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '--cleanup'], capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_custom_url(self, m_readurl, m_subp):
+ """Install AIO from custom URL"""
+ cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh')
+ m_readurl.assert_called_with(
+ url='http://custom.url/path/to/script.sh',
+ retries=5)
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '--cleanup'], capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_version(self, m_readurl, m_subp):
+ """Install AIO with specific version"""
+ cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0')
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_collection(self, m_readurl, m_subp):
+ """Install AIO with specific collection"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly')
+
+ self.assertEqual(
+ [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'],
+ capture=False)],
+ m_subp.call_args_list)
+
+ def test_install_with_no_cleanup(self, m_readurl, m_subp):
+ """Install AIO with no cleanup"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, None, False)
+
+ self.assertEqual(
+ [mock.call([mock.ANY], capture=False)],
+ m_subp.call_args_list)
diff --git a/tests/unittests/test_handler/test_handler_resolv_conf.py b/tests/unittests/test_handler/test_handler_resolv_conf.py
new file mode 100644
index 00000000..96139001
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_resolv_conf.py
@@ -0,0 +1,105 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.config import cc_resolv_conf
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+from copy import deepcopy
+
+from cloudinit.tests import helpers as t_help
+
+import logging
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+LOG = logging.getLogger(__name__)
+
+
+class TestResolvConf(t_help.FilesystemMockingTestCase):
+ with_logs = True
+ cfg = {'manage_resolv_conf': True, 'resolv_conf': {}}
+
+ def setUp(self):
+ super(TestResolvConf, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ util.ensure_dir(os.path.join(self.tmp, 'data'))
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _fetch_distro(self, kind, conf=None):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ conf = {} if conf is None else conf
+ return cls(kind, conf, paths)
+
+ def call_resolv_conf_handler(self, distro_name, conf, cc=None):
+ if not cc:
+ ds = None
+ distro = self._fetch_distro(distro_name, conf)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc_resolv_conf.handle('cc_resolv_conf', conf, cc, LOG, [])
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_systemd_resolved(self, m_render_to_file):
+ self.call_resolv_conf_handler('photon', self.cfg)
+
+ assert [
+ mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_no_param(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp.pop('resolv_conf')
+ self.call_resolv_conf_handler('photon', tmp)
+
+ self.assertIn('manage_resolv_conf True but no parameters provided',
+ self.logs.getvalue())
+ assert [
+ mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp['manage_resolv_conf'] = False
+ self.call_resolv_conf_handler('photon', tmp)
+ self.assertIn("'manage_resolv_conf' present but set to False",
+ self.logs.getvalue())
+ assert [
+ mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_etc_resolv_conf(self, m_render_to_file):
+ self.call_resolv_conf_handler('rhel', self.cfg)
+
+ assert [
+ mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file):
+ ds = None
+ distro = self._fetch_distro('rhel', self.cfg)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc.distro.resolve_conf_fn = 'bla'
+
+ self.logs.truncate(0)
+ self.call_resolv_conf_handler('rhel', self.cfg, cc)
+
+ self.assertIn('No template found, not rendering resolve configs',
+ self.logs.getvalue())
+
+ assert [
+ mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index 32ca3b7e..1a524c7d 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -120,8 +120,8 @@ class TestHostname(t_help.FilesystemMockingTestCase):
contents = util.load_file(distro.hostname_conf_fn)
self.assertEqual('blah', contents.strip())
- @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False)
- def test_photon_hostname(self, m_uses_systemd):
+ @mock.patch('cloudinit.distros.photon.subp.subp')
+ def test_photon_hostname(self, m_subp):
cfg1 = {
'hostname': 'photon',
'prefer_fqdn_over_hostname': True,
@@ -134,17 +134,31 @@ class TestHostname(t_help.FilesystemMockingTestCase):
}
ds = None
+ m_subp.return_value = (None, None)
distro = self._fetch_distro('photon', cfg1)
paths = helpers.Paths({'cloud_dir': self.tmp})
cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
for c in [cfg1, cfg2]:
cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, [])
- contents = util.load_file(distro.hostname_conf_fn, decode=True)
+ print("\n", m_subp.call_args_list)
if c['prefer_fqdn_over_hostname']:
- self.assertEqual(contents.strip(), c['fqdn'])
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['fqdn']],
+ capture=True)
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['hostname']],
+ capture=True)
+ ] not in m_subp.call_args_list
else:
- self.assertEqual(contents.strip(), c['hostname'])
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['hostname']],
+ capture=True)
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(['hostnamectl', 'set-hostname', c['fqdn']],
+ capture=True)
+ ] not in m_subp.call_args_list
def test_multiple_calls_skips_unchanged_hostname(self):
"""Only new hostname or fqdn values will generate a hostname call."""
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 43e209c1..fc77b11e 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -5308,6 +5308,7 @@ class TestNetRenderers(CiTestCase):
('opensuse-tumbleweed', '', ''),
('sles', '', ''),
('centos', '', ''),
+ ('eurolinux', '', ''),
('fedora', '', ''),
('redhat', '', ''),
]
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
index db825c35..38f2edf2 100644
--- a/tests/unittests/test_net_activators.py
+++ b/tests/unittests/test_net_activators.py
@@ -11,7 +11,8 @@ from cloudinit.net.activators import (
from cloudinit.net.activators import (
IfUpDownActivator,
NetplanActivator,
- NetworkManagerActivator
+ NetworkManagerActivator,
+ NetworkdActivator
)
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.safeyaml import load
@@ -116,11 +117,17 @@ NETWORK_MANAGER_AVAILABLE_CALLS = [
(('nmcli',), {'target': None}),
]
+NETWORKD_AVAILABLE_CALLS = [
+ (('ip',), {'search': ['/usr/bin', '/bin'], 'target': None}),
+ (('systemctl',), {'search': ['/usr/bin', '/bin'], 'target': None}),
+]
+
@pytest.mark.parametrize('activator, available_calls', [
(IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS),
(NetplanActivator, NETPLAN_AVAILABLE_CALLS),
(NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS),
+ (NetworkdActivator, NETWORKD_AVAILABLE_CALLS),
])
class TestActivatorsAvailable:
def test_available(
@@ -140,11 +147,18 @@ NETWORK_MANAGER_BRING_UP_CALL_LIST = [
((['nmcli', 'connection', 'up', 'ifname', 'eth1'], ), {}),
]
+NETWORKD_BRING_UP_CALL_LIST = [
+ ((['ip', 'link', 'set', 'up', 'eth0'], ), {}),
+ ((['ip', 'link', 'set', 'up', 'eth1'], ), {}),
+ ((['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'], ), {}),
+]
+
@pytest.mark.parametrize('activator, expected_call_list', [
(IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST),
(NetplanActivator, NETPLAN_CALL_LIST),
(NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_UP_CALL_LIST),
])
class TestActivatorsBringUp:
@patch('cloudinit.subp.subp', return_value=('', ''))
@@ -159,8 +173,11 @@ class TestActivatorsBringUp:
def test_bring_up_interfaces(
self, m_subp, activator, expected_call_list, available_mocks
):
+ index = 0
activator.bring_up_interfaces(['eth0', 'eth1'])
- assert expected_call_list == m_subp.call_args_list
+ for call in m_subp.call_args_list:
+ assert call == expected_call_list[index]
+ index += 1
@patch('cloudinit.subp.subp', return_value=('', ''))
def test_bring_up_all_interfaces_v1(
@@ -191,11 +208,17 @@ NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [
((['nmcli', 'connection', 'down', 'eth1'], ), {}),
]
+NETWORKD_BRING_DOWN_CALL_LIST = [
+ ((['ip', 'link', 'set', 'down', 'eth0'], ), {}),
+ ((['ip', 'link', 'set', 'down', 'eth1'], ), {}),
+]
+
@pytest.mark.parametrize('activator, expected_call_list', [
(IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST),
(NetplanActivator, NETPLAN_CALL_LIST),
(NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_DOWN_CALL_LIST),
])
class TestActivatorsBringDown:
@patch('cloudinit.subp.subp', return_value=('', ''))
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 275879af..00d50e66 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -9,9 +9,9 @@ from cloudinit import subp
from cloudinit import util
# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
-DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
- "netbsd", "openbsd", "photon", "rhel", "suse", "ubuntu",
- "unknown"]
+DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora",
+ "freebsd", "netbsd", "openbsd", "photon", "rhel", "suse",
+ "ubuntu", "unknown"]
@pytest.mark.allow_subp_for(sys.executable)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index bcb8044f..a66788bf 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -1,6 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
+
from collections import namedtuple
+from functools import partial
from unittest.mock import patch
from cloudinit import ssh_util
@@ -8,13 +11,48 @@ from cloudinit.tests import helpers as test_helpers
from cloudinit import util
# https://stackoverflow.com/questions/11351032/
-FakePwEnt = namedtuple(
- 'FakePwEnt',
- ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid'])
+FakePwEnt = namedtuple('FakePwEnt', [
+ 'pw_name',
+ 'pw_passwd',
+ 'pw_uid',
+ 'pw_gid',
+ 'pw_gecos',
+ 'pw_dir',
+ 'pw_shell',
+])
FakePwEnt.__new__.__defaults__ = tuple(
"UNSET_%s" % n for n in FakePwEnt._fields)
+def mock_get_owner(updated_permissions, value):
+ try:
+ return updated_permissions[value][0]
+ except ValueError:
+ return util.get_owner(value)
+
+
+def mock_get_group(updated_permissions, value):
+ try:
+ return updated_permissions[value][1]
+ except ValueError:
+ return util.get_group(value)
+
+
+def mock_get_user_groups(username):
+ return username
+
+
+def mock_get_permissions(updated_permissions, value):
+ try:
+ return updated_permissions[value][2]
+ except ValueError:
+ return util.get_permissions(value)
+
+
+def mock_getpwnam(users, username):
+ return users[username]
+
+
# Do not use these public keys, most of them are fetched from
# the testdata for OpenSSH, and their private keys are available
# https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata
@@ -552,12 +590,30 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
ssh_util.render_authorizedkeysfile_paths(
"/opt/%u/keys", "/home/bobby", "bobby"))
+ def test_user_file(self):
+ self.assertEqual(
+ ["/opt/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u", "/home/bobby", "bobby"))
+
+ def test_user_file2(self):
+ self.assertEqual(
+ ["/opt/bobby/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u/%u", "/home/bobby", "bobby"))
+
def test_multiple(self):
self.assertEqual(
["/keys/path1", "/keys/path2"],
ssh_util.render_authorizedkeysfile_paths(
"/keys/path1 /keys/path2", "/home/bobby", "bobby"))
+ def test_multiple2(self):
+ self.assertEqual(
+ ["/keys/path1", "/keys/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/keys/path1 /keys/%u", "/home/bobby", "bobby"))
+
def test_relative(self):
self.assertEqual(
["/home/bobby/.secret/keys"],
@@ -581,269 +637,763 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
- @patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order1(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
- m_getpwnam.return_value = fpw
- user_ssh_folder = "%s/.ssh" % fpw.pw_dir
-
- # /tmp/home2/bobby/.ssh/authorized_keys = rsa
- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder)
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
-
- # /tmp/home2/bobby/.ssh/user_keys = dsa
- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder)
- util.write_file(user_keys, VALID_CONTENT['dsa'])
-
- # /tmp/sshd_config
+ def create_fake_users(self, names, mock_permissions,
+ m_get_group, m_get_owner, m_get_permissions,
+ m_getpwnam, users):
+ homes = []
+
+ root = '/tmp/root'
+ fpw = FakePwEnt(pw_name="root", pw_dir=root)
+ users["root"] = fpw
+
+ for name in names:
+ home = '/tmp/home/' + name
+ fpw = FakePwEnt(pw_name=name, pw_dir=home)
+ users[name] = fpw
+ homes.append(home)
+
+ m_get_permissions.side_effect = partial(
+ mock_get_permissions, mock_permissions)
+ m_get_owner.side_effect = partial(mock_get_owner, mock_permissions)
+ m_get_group.side_effect = partial(mock_get_group, mock_permissions)
+ m_getpwnam.side_effect = partial(mock_getpwnam, users)
+ return homes
+
+ def create_user_authorized_file(self, home, filename, content_key, keys):
+ user_ssh_folder = "%s/.ssh" % home
+ # /tmp/home/<user>/.ssh/authorized_keys = content_key
+ authorized_keys = self.tmp_path(filename, dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_global_authorized_file(self, filename, content_key, keys):
+ authorized_keys = self.tmp_path(filename, dir='/tmp')
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_sshd_config(self, authorized_keys_files):
sshd_config = self.tmp_path('sshd_config', dir="/tmp")
util.write_file(
sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ "AuthorizedKeysFile " + authorized_keys_files
)
+ return sshd_config
+ def execute_and_check(self, user, sshd_config, solution, keys,
+ delete_keys=True):
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
+ user, sshd_config)
content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual(user_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.assertEqual(auth_key_fn, solution)
+ for path, key in keys.items():
+ if path == solution:
+ self.assertTrue(VALID_CONTENT[key] in content)
+ else:
+ self.assertFalse(VALID_CONTENT[key] in content)
+
+ if delete_keys and os.path.isdir("/tmp/home/"):
+ util.delete_dir_contents("/tmp/home/")
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order2(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie')
- m_getpwnam.return_value = fpw
- user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
- # /tmp/home/suzie/.ssh/authorized_keys = rsa
- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder)
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys', 'rsa', keys
+ )
- # /tmp/home/suzie/.ssh/user_keys = dsa
- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder)
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys', 'dsa', keys
+ )
# /tmp/sshd_config
- sshd_config = self.tmp_path('sshd_config', dir="/tmp")
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys)
+ options = "%s %s" % (authorized_keys, user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
)
+ home = homes[0]
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys', 'rsa', keys
+ )
- self.assertEqual(authorized_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys', 'dsa', keys
+ )
- @patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
- m_getpwnam.return_value = fpw
- user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+ # /tmp/sshd_config
+ options = "%s %s" % (user_keys, authorized_keys)
+ sshd_config = self.create_sshd_config(options)
- # /tmp/home2/bobby/.ssh/authorized_keys = rsa
- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder)
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
- # /tmp/home2/bobby/.ssh/user_keys = dsa
- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder)
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
- # /tmp/etc/ssh/authorized_keys = ecdsa
- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys',
- dir="/tmp")
- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys', 'rsa', keys
+ )
- # /tmp/sshd_config
- sshd_config = self.tmp_path('sshd_config', dir="/tmp")
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s %s" % (authorized_keys_global,
- user_keys, authorized_keys)
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys', 'dsa', keys
)
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'ecdsa', keys
+ )
- self.assertEqual(authorized_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['ecdsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ options = "%s %s %s" % (authorized_keys_global, user_keys,
+ authorized_keys)
+ sshd_config = self.create_sshd_config(options)
- @patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
- m_getpwnam.return_value = fpw
- user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
- # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa
- authorized_keys = self.tmp_path('authorized_keys2',
- dir=user_ssh_folder)
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
- # /tmp/home2/bobby/.ssh/user_keys3 = dsa
- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder)
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, 'authorized_keys2', 'rsa', keys
+ )
- # /tmp/etc/ssh/authorized_keys = ecdsa
- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys',
- dir="/tmp")
- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, 'user_keys3', 'dsa', keys
+ )
- # /tmp/sshd_config
- sshd_config = self.tmp_path('sshd_config', dir="/tmp")
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s %s" % (authorized_keys_global,
- authorized_keys, user_keys)
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'ecdsa', keys
)
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ options = "%s %s %s" % (authorized_keys_global, authorized_keys,
+ user_keys)
+ sshd_config = self.create_sshd_config(options)
- self.assertEqual(user_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['ecdsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_global(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
- m_getpwnam.return_value = fpw
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_global_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = 'bobby'
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby], mock_permissions, m_get_group, m_get_owner,
+ m_get_permissions, m_getpwnam, users
+ )
+ home = homes[0]
# /tmp/etc/ssh/authorized_keys = rsa
- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys',
- dir="/tmp")
- util.write_file(authorized_keys_global, VALID_CONTENT['rsa'])
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'rsa', keys
+ )
- # /tmp/sshd_config
- sshd_config = self.tmp_path('sshd_config')
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s" % (authorized_keys_global)
+ options = "%s" % authorized_keys_global
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home
+ self.execute_and_check(user_bobby, sshd_config, default, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_standard(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
)
+ home_bobby = homes[0]
+ home_suzie = homes[1]
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
+ # /tmp/home/suzie/.ssh/authorized_keys = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ options = ".ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
- m_getpwnam.return_value = fpw
- user_ssh_folder = "%s/.ssh" % fpw.pw_dir
- # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa
- authorized_keys = self.tmp_path('authorized_keys2',
- dir=user_ssh_folder)
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
- # /tmp/home2/bobby/.ssh/user_keys3 = dsa
- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder)
- util.write_file(user_keys, VALID_CONTENT['dsa'])
-
- fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie')
- user_ssh_folder = "%s/.ssh" % fpw2.pw_dir
- # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com
- authorized_keys2 = self.tmp_path('authorized_keys2',
- dir=user_ssh_folder)
- util.write_file(authorized_keys2,
- VALID_CONTENT['ssh-xmss@openssh.com'])
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_custom(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
- # /tmp/etc/ssh/authorized_keys = ecdsa
- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2',
- dir="/tmp")
- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys2', 'rsa', keys
+ )
- # /tmp/sshd_config
- sshd_config = self.tmp_path('sshd_config', dir="/tmp")
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" %
- (authorized_keys_global, user_keys)
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys
)
- # process first user
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ options = ".ssh/authorized_keys2"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
- self.assertEqual(user_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['ecdsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
- self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content)
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600),
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600),
+ '/tmp/home/suzie/.ssh/user_keys3': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
- m_getpwnam.return_value = fpw2
- # process second user
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw2.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ self.create_user_authorized_file(
+ home_bobby, 'authorized_keys2', 'rsa', keys
+ )
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.create_user_authorized_file(
+ home_bobby, 'user_keys3', 'dsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys2', 'ecdsa', keys
+ )
+
+ options = "%s %s %%h/.ssh/authorized_keys2" % \
+ (authorized_keys_global, user_keys)
+ sshd_config = self.create_sshd_config(options)
- self.assertEqual(authorized_keys2, auth_key_fn)
- self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content)
- self.assertTrue(VALID_CONTENT['ecdsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
- self.assertFalse(VALID_CONTENT['rsa'] in content)
+ self.execute_and_check(
+ user_bobby, sshd_config, user_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+ @patch("cloudinit.util.get_user_groups")
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby')
- m_getpwnam.return_value = fpw
- user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files_badguy(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600),
+ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600),
+ '/tmp/home/badguy': ('root', 'root', 0o755),
+ '/tmp/home/badguy/home': ('root', 'root', 0o755),
+ '/tmp/home/badguy/home/bobby': ('root', 'root', 0o655),
+ }
+
+ user_bobby = 'bobby'
+ user_badguy = 'badguy'
+ home_bobby, *_ = self.create_fake_users(
+ [user_bobby, user_badguy], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+
# /tmp/home/bobby/.ssh/authorized_keys2 = rsa
- authorized_keys = self.tmp_path('authorized_keys2',
- dir=user_ssh_folder)
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys2', 'rsa', keys
+ )
# /tmp/home/bobby/.ssh/user_keys3 = dsa
- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder)
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ user_keys = self.create_user_authorized_file(
+ home_bobby, 'user_keys3', 'dsa', keys
+ )
- fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy')
- user_ssh_folder = "%s/.ssh" % fpw2.pw_dir
# /tmp/home/badguy/home/bobby = ""
authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy")
+ util.write_file(authorized_keys2, '')
# /tmp/etc/ssh/authorized_keys = ecdsa
- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2',
- dir="/tmp")
- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys2', 'ecdsa', keys
+ )
# /tmp/sshd_config
- sshd_config = self.tmp_path('sshd_config', dir="/tmp")
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" %
- (authorized_keys_global, user_keys, authorized_keys2)
+ options = "%s %%h/.ssh/authorized_keys2 %s %s" % \
+ (authorized_keys2, authorized_keys_global, user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
)
- # process first user
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_unaccessible_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/etc': ('root', 'root', 0o755),
+ '/tmp/etc/ssh': ('root', 'root', 0o755),
+ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o700),
+ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600),
+ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600),
+
+ '/tmp/home/badguy': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh/authorized_keys':
+ ('badguy', 'badguy', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_badguy = 'badguy'
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
- self.assertEqual(user_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['ecdsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ self.create_global_authorized_file(
+ 'etc/ssh/userkeys/bobby', 'dsa', keys
+ )
- m_getpwnam.return_value = fpw2
- # process second user
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw2.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
- # badguy should not take the key from the other user!
- self.assertEqual(authorized_keys2, auth_key_fn)
- self.assertTrue(VALID_CONTENT['ecdsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
- self.assertFalse(VALID_CONTENT['rsa'] in content)
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ self.create_global_authorized_file(
+ 'etc/ssh/userkeys/badguy', 'ecdsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_accessible_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/etc': ('root', 'root', 0o755),
+ '/tmp/etc/ssh': ('root', 'root', 0o755),
+ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o755),
+ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600),
+ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600),
+
+ '/tmp/home/badguy': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700),
+ '/tmp/home/badguy/.ssh/authorized_keys':
+ ('badguy', 'badguy', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_badguy = 'badguy'
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ authorized_keys = self.create_global_authorized_file(
+ 'etc/ssh/userkeys/bobby', 'dsa', keys
+ )
+
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ authorized_keys2 = self.create_global_authorized_file(
+ 'etc/ssh/userkeys/badguy', 'ecdsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ default = "%s/.ssh/authorized_keys" % home_suzie
+ self.execute_and_check(user_suzie, sshd_config, default, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys2)
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home_bobby
+ self.execute_and_check(
+ user_bobby, sshd_config, default, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_user_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam,
+ m_get_user_groups
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ '/tmp/home/bobby': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700),
+ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600),
+
+ '/tmp/home/suzie': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700),
+ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600),
+ }
+
+ user_bobby = 'bobby'
+ user_suzie = 'suzie'
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie], mock_permissions, m_get_group,
+ m_get_owner, m_get_permissions, m_getpwnam, users
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, 'authorized_keys', 'rsa', keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ 'etc/ssh/authorized_keys', 'ecdsa', keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %s %s" % \
+ (authorized_keys_global, authorized_keys, authorized_keys2)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
# vi: ts=4 expandtab
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index e2979ed4..cf06ca3d 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -1,5 +1,6 @@
ader1990
ajmyyra
+akutz
AlexBaranowski
Aman306
andrewbogott
@@ -12,6 +13,7 @@ BirknerAlex
bmhughes
candlerb
cawamata
+ciprianbadescu
dankenigsberg
ddymko
dermotbradley
@@ -20,8 +22,10 @@ eandersson
eb3095
emmanuelthome
esposem
+GabrielNagy
giggsoff
hamalq
+impl
irishgordo
izzyleung
johnsonshi
@@ -32,6 +36,7 @@ klausenbusk
landon912
lucasmoura
lungj
+mal
mamercad
manuelisimo
marlluslustosa
diff --git a/tools/ds-identify b/tools/ds-identify
index 73e27c71..f509f566 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -125,7 +125,7 @@ DI_DSNAME=""
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud"
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -141,6 +141,7 @@ error() {
debug 0 "$@"
stderr "$@"
}
+
warn() {
set -- "WARN:" "$@"
debug 0 "$@"
@@ -344,7 +345,6 @@ geom_label_status_as() {
return $ret
}
-
read_fs_info_freebsd() {
local oifs="$IFS" line="" delim=","
local ret=0 labels="" dev="" label="" ftype="" isodevs=""
@@ -404,7 +404,6 @@ cached() {
[ -n "$1" ] && _RET="$1" && return || return 1
}
-
detect_virt() {
local virt="${UNAVAILABLE}" r="" out=""
if [ -d /run/systemd ]; then
@@ -450,7 +449,7 @@ detect_virt() {
read_virt() {
cached "$DI_VIRT" && return 0
detect_virt
- DI_VIRT=${_RET}
+ DI_VIRT="${_RET}"
}
is_container() {
@@ -1364,6 +1363,84 @@ dscheck_Vultr() {
return $DS_NOT_FOUND
}
+vmware_has_envvar_vmx_guestinfo() {
+ [ -n "${VMX_GUESTINFO:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_metadata() {
+ [ -n "${VMX_GUESTINFO_METADATA:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_userdata() {
+ [ -n "${VMX_GUESTINFO_USERDATA:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_vendordata() {
+ [ -n "${VMX_GUESTINFO_VENDORDATA:-}" ]
+}
+
+vmware_has_rpctool() {
+ command -v vmware-rpctool >/dev/null 2>&1
+}
+
+vmware_rpctool_guestinfo() {
+ vmware-rpctool "info-get guestinfo.${1}" 2>/dev/null | grep "[[:alnum:]]"
+}
+
+vmware_rpctool_guestinfo_metadata() {
+ vmware_rpctool_guestinfo "metadata"
+}
+
+vmware_rpctool_guestinfo_userdata() {
+ vmware_rpctool_guestinfo "userdata"
+}
+
+vmware_rpctool_guestinfo_vendordata() {
+ vmware_rpctool_guestinfo "vendordata"
+}
+
+dscheck_VMware() {
+ # Checks to see if there is valid data for the VMware datasource.
+ # The data transports are checked in the following order:
+ #
+ # * envvars
+ # * guestinfo
+ #
+ # Please note when updating this function with support for new data
+ # transports, the order should match the order in the _get_data
+ # function from the file DataSourceVMware.py.
+
+ # Check to see if running in a container and the VMware
+ # datasource is configured via environment variables.
+ if vmware_has_envvar_vmx_guestinfo; then
+ if vmware_has_envvar_vmx_guestinfo_metadata || \
+ vmware_has_envvar_vmx_guestinfo_userdata || \
+ vmware_has_envvar_vmx_guestinfo_vendordata; then
+ return "${DS_FOUND}"
+ fi
+ fi
+
+ # Do not proceed unless the detected platform is VMware.
+ if [ ! "${DI_VIRT}" = "vmware" ]; then
+ return "${DS_NOT_FOUND}"
+ fi
+
+ # Do not proceed if the vmware-rpctool command is not present.
+ if ! vmware_has_rpctool; then
+ return "${DS_NOT_FOUND}"
+ fi
+
+ # Activate the VMware datasource only if any of the fields used
+ # by the datasource are present in the guestinfo table.
+ if { vmware_rpctool_guestinfo_metadata || \
+ vmware_rpctool_guestinfo_userdata || \
+ vmware_rpctool_guestinfo_vendordata; } >/dev/null 2>&1; then
+ return "${DS_FOUND}"
+ fi
+
+ return "${DS_NOT_FOUND}"
+}
+
collect_info() {
read_uname_info
read_virt
diff --git a/tools/hook-hotplug b/tools/hook-hotplug
index 34e95929..ced268b3 100755
--- a/tools/hook-hotplug
+++ b/tools/hook-hotplug
@@ -8,12 +8,17 @@ is_finished() {
[ -e /run/cloud-init/result.json ]
}
-if is_finished; then
+hotplug_enabled() {
+ [ "$(cloud-init devel hotplug-hook -s "${SUBSYSTEM}" query)" == "enabled" ]
+}
+
+if is_finished && hotplug_enabled; then
# open cloud-init's hotplug-hook fifo rw
exec 3<>/run/cloud-init/hook-hotplug-cmd
env_params=(
- --devpath="${DEVPATH}"
--subsystem="${SUBSYSTEM}"
+ handle
+ --devpath="${DEVPATH}"
--udevaction="${ACTION}"
)
# write params to cloud-init's hotplug-hook fifo
diff --git a/tools/read-dependencies b/tools/read-dependencies
index e52720d4..810154e4 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -23,6 +23,7 @@ DEFAULT_REQUIREMENTS = 'requirements.txt'
# Map the appropriate package dir needed for each distro choice
DISTRO_PKG_TYPE_MAP = {
'centos': 'redhat',
+ 'eurolinux': 'redhat',
'rocky': 'redhat',
'redhat': 'redhat',
'debian': 'debian',
@@ -68,11 +69,13 @@ ZYPPER_INSTALL = [
DRY_DISTRO_INSTALL_PKG_CMD = {
'rocky': ['yum', 'install', '--assumeyes'],
'centos': ['yum', 'install', '--assumeyes'],
+ 'eurolinux': ['yum', 'install', '--assumeyes'],
'redhat': ['yum', 'install', '--assumeyes'],
}
DISTRO_INSTALL_PKG_CMD = {
'rocky': MAYBE_RELIABLE_YUM_INSTALL,
+ 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL,
'centos': MAYBE_RELIABLE_YUM_INSTALL,
'redhat': MAYBE_RELIABLE_YUM_INSTALL,
'debian': ['apt', 'install', '-y'],
@@ -85,6 +88,7 @@ DISTRO_INSTALL_PKG_CMD = {
# List of base system packages required to enable ci automation
CI_SYSTEM_BASE_PKGS = {
'common': ['make', 'sudo', 'tar'],
+ 'eurolinux': ['python3-tox'],
'redhat': ['python3-tox'],
'centos': ['python3-tox'],
'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
@@ -277,10 +281,10 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False):
cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro]
install_cmd.extend(cmd)
- if distro in ['centos', 'redhat', 'rocky']:
+ if distro in ['centos', 'redhat', 'rocky', 'eurolinux']:
# CentOS and Redhat need epel-release to access oauthlib and jsonschema
subprocess.check_call(install_cmd + ['epel-release'])
- if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos']:
+ if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos', 'eurolinux']:
pkg_list.append('rpm-build')
subprocess.check_call(install_cmd + pkg_list)
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index 7e667de4..30f82521 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -5,8 +5,8 @@ import os
import sys
VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel",
- "suse","rocky", "ubuntu", "unknown"]
+ "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon",
+ "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"]
if "avoid-pep8-E402-import-not-top-of-file":
diff --git a/tox.ini b/tox.ini
index f21e1186..27c16ef3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,7 +23,7 @@ setenv =
basepython = python3
deps =
# requirements
- pylint==2.6.0
+ pylint==2.9.3
# test-requirements because unit tests are now present in cloudinit tree
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt