summaryrefslogtreecommitdiff
path: root/cloudinit/config
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/config')
-rw-r--r--cloudinit/config/__init__.py47
-rw-r--r--cloudinit/config/cc_apt_configure.py5
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_byobu.py0
-rw-r--r--cloudinit/config/cc_emit_upstart.py78
-rw-r--r--cloudinit/config/cc_fan.py60
-rw-r--r--cloudinit/config/cc_final_message.py49
-rw-r--r--cloudinit/config/cc_foo.py57
-rw-r--r--cloudinit/config/cc_growpart.py266
-rw-r--r--cloudinit/config/cc_grub_dpkg.py69
-rw-r--r--cloudinit/config/cc_install_hotplug.py51
-rw-r--r--cloudinit/config/cc_keyboard.py69
-rw-r--r--cloudinit/config/cc_keys_to_console.py80
-rw-r--r--cloudinit/config/cc_landscape.py110
-rw-r--r--cloudinit/config/cc_locale.py33
-rw-r--r--cloudinit/config/cc_lxd.py106
-rw-r--r--cloudinit/config/cc_mcollective.py100
-rw-r--r--cloudinit/config/cc_migrator.py42
-rw-r--r--cloudinit/config/cc_mounts.py110
-rw-r--r--cloudinit/config/cc_ntp.py135
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py73
-rw-r--r--cloudinit/config/cc_phone_home.py100
-rw-r--r--cloudinit/config/cc_power_state_change.py105
-rw-r--r--cloudinit/config/cc_puppet.py183
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py41
-rw-r--r--cloudinit/config/cc_reset_rmc.py38
-rw-r--r--cloudinit/config/cc_resizefs.py33
-rw-r--r--cloudinit/config/cc_resolv_conf.py92
-rw-r--r--cloudinit/config/cc_rh_subscription.py98
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py52
-rw-r--r--cloudinit/config/cc_rsyslog.py240
-rw-r--r--cloudinit/config/cc_runcmd.py77
-rw-r--r--cloudinit/config/cc_salt_minion.py78
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py33
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py32
-rw-r--r--cloudinit/config/cc_scripts_per_once.py36
-rw-r--r--cloudinit/config/cc_scripts_user.py32
-rw-r--r--cloudinit/config/cc_scripts_vendor.py70
-rw-r--r--cloudinit/config/cc_seed_random.py103
-rw-r--r--cloudinit/config/cc_set_hostname.py77
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_set_passwords.py191
-rw-r--r--cloudinit/config/cc_snap.py110
-rw-r--r--cloudinit/config/cc_spacewalk.py44
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_ssh.py173
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_ssh_authkey_fingerprints.py51
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_ssh_import_id.py124
-rw-r--r--cloudinit/config/cc_timezone.py41
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py37
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py47
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py84
-rw-r--r--cloudinit/config/cc_update_hostname.py86
-rw-r--r--cloudinit/config/cc_users_groups.py249
-rw-r--r--cloudinit/config/cc_write_files.py147
-rw-r--r--cloudinit/config/cc_write_files_deferred.py60
-rw-r--r--cloudinit/config/cc_yum_add_repo.py118
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py79
-rw-r--r--cloudinit/config/cloud-init-schema.json560
-rw-r--r--cloudinit/config/modules.py302
-rw-r--r--cloudinit/config/schema.py292
-rw-r--r--cloudinit/config/schemas/__init__.py0
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json2273
-rw-r--r--cloudinit/config/schemas/versions.schema.cloud-config.json18
61 files changed, 5052 insertions, 3094 deletions
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
index ed124180..e69de29b 100644
--- a/cloudinit/config/__init__.py
+++ b/cloudinit/config/__init__.py
@@ -1,47 +0,0 @@
-# Copyright (C) 2008-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Chuck Short <chuck.short@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import log as logging
-from cloudinit.settings import FREQUENCIES, PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-# This prefix is used to make it less
-# of a chance that when importing
-# we will not find something else with the same
-# name in the lookup path...
-MOD_PREFIX = "cc_"
-
-
-def form_module_name(name):
- canon_name = name.replace("-", "_")
- if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0 : (len(canon_name) - 3)]
- canon_name = canon_name.strip()
- if not canon_name:
- return None
- if not canon_name.startswith(MOD_PREFIX):
- canon_name = "%s%s" % (MOD_PREFIX, canon_name)
- return canon_name
-
-
-def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, "frequency"):
- setattr(mod, "frequency", def_freq)
- else:
- freq = mod.frequency
- if freq and freq not in FREQUENCIES:
- LOG.warning("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, "distros"):
- setattr(mod, "distros", [])
- if not hasattr(mod, "osfamilies"):
- setattr(mod, "osfamilies", [])
- return mod
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index c558311a..7ca50194 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -571,7 +571,10 @@ def add_apt_sources(
if aa_repo_match(source):
try:
- subp.subp(["add-apt-repository", source], target=target)
+ subp.subp(
+ ["add-apt-repository", "--no-update", source],
+ target=target,
+ )
except subp.ProcessExecutionError:
LOG.exception("add-apt-repository failed.")
raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index fbc20410..fbc20410 100755..100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
deleted file mode 100644
index a928082b..00000000
--- a/cloudinit/config/cc_emit_upstart.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Emit Upstart
-------------
-**Summary:** emit upstart configuration
-
-Emit upstart configuration for cloud-init modules on upstart based systems. No
-user configuration should be required.
-
-**Internal name:** ``cc_emit_upstart``
-
-**Module frequency:** always
-
-**Supported distros:** ubuntu, debian
-"""
-
-import os
-
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-distros = ["ubuntu", "debian"]
-LOG = logging.getLogger(__name__)
-
-
-def is_upstart_system():
- if not os.path.isfile("/sbin/initctl"):
- LOG.debug("no /sbin/initctl located")
- return False
-
- myenv = os.environ.copy()
- if "UPSTART_SESSION" in myenv:
- del myenv["UPSTART_SESSION"]
- check_cmd = ["initctl", "version"]
- try:
- (out, _err) = subp.subp(check_cmd, env=myenv)
- return "upstart" in out
- except subp.ProcessExecutionError as e:
- LOG.debug(
- "'%s' returned '%s', not using upstart",
- " ".join(check_cmd),
- e.exit_code,
- )
- return False
-
-
-def handle(name, _cfg, cloud, log, args):
- event_names = args
- if not event_names:
- # Default to the 'cloud-config'
- # event for backwards compat.
- event_names = ["cloud-config"]
-
- if not is_upstart_system():
- log.debug("not upstart system, '%s' disabled", name)
- return
-
- cfgpath = cloud.paths.get_ipath_cur("cloud_config")
- for n in event_names:
- cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath]
- try:
- subp.subp(cmd)
- except Exception as e:
- # TODO(harlowja), use log exception from utils??
- log.warning("Emission of upstart event %s failed due to: %s", n, e)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 50a81744..57c762a1 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -3,12 +3,16 @@
# Author: Scott Moser <scott.moser@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Fan: Configure ubuntu fan networking"""
-"""
-Fan
----
-**Summary:** configure ubuntu fan networking
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module installs, configures and starts the ubuntu fan network system. For
more information about Ubuntu Fan, see:
``https://wiki.ubuntu.com/FanNetworking``.
@@ -19,31 +23,37 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
- install the package ``ubuntu-fan`` if it is not installed
- ensure the service is started (or restarted if was previously running)
-**Internal name:** ``cc_fan``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- fan:
- config: |
- # fan 240
- 10.0.0.0/8 eth0/16 dhcp
- 10.0.0.0/8 eth1/16 dhcp off
- # fan 241
- 241.0.0.0/8 eth0/16 dhcp
- config_path: /etc/network/fan
+Additionally, the ``ubuntu-fan`` package will be automatically installed
+if not present.
"""
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.settings import PER_INSTANCE
+distros = ["ubuntu"]
+meta: MetaSchema = {
+ "id": "cc_fan",
+ "name": "Fan",
+ "title": "Configure ubuntu fan networking",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ fan:
+ config: |
+ # fan 240
+ 10.0.0.0/8 eth0/16 dhcp
+ 10.0.0.0/8 eth1/16 dhcp off
+ # fan 241
+ 241.0.0.0/8 eth0/16 dhcp
+ config_path: /etc/network/fan
+ """
+ )
+ ],
+}
-LOG = logging.getLogger(__name__)
+__doc__ = get_meta_doc(meta)
-frequency = PER_INSTANCE
+LOG = logging.getLogger(__name__)
BUILTIN_CFG = {
"config": None,
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index f443ccd8..89be520e 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -5,12 +5,16 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Final Message: Output final message when cloud-init has finished"""
-"""
-Final Message
--------------
-**Summary:** output final message when cloud-init has finished
+from textwrap import dedent
+
+from cloudinit import templater, util, version
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module configures the final message that cloud-init writes. The message is
specified as a jinja template with the following variables set:
@@ -19,22 +23,31 @@ specified as a jinja template with the following variables set:
- ``datasource``: cloud-init data source
- ``uptime``: system uptime
-**Internal name:** ``cc_final_message``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- final_message: <message>
-
+Upon exit, this module writes ``/var/lib/cloud/instance/boot-finished``.
"""
-
-from cloudinit import templater, util, version
-from cloudinit.settings import PER_ALWAYS
-
frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_final_message",
+ "name": "Final Message",
+ "title": "Output final message when cloud-init has finished",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [
+ dedent(
+ """\
+ final_message: |
+ cloud-init has finished
+ version: $version
+ timestamp: $timestamp
+ datasource: $datasource
+ uptime: $uptime
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
# Jinja formated default message
FINAL_MESSAGE_DEF = (
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
deleted file mode 100644
index 3c307153..00000000
--- a/cloudinit/config/cc_foo.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Foo
----
-**Summary:** example module
-
-Example to show module structure. Does not do anything.
-
-**Internal name:** ``cc_foo``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-"""
-
-from cloudinit.settings import PER_INSTANCE
-
-# Modules are expected to have the following attributes.
-# 1. A required 'handle' method which takes the following params.
-# a) The name will not be this files name, but instead
-# the name specified in configuration (which is the name
-# which will be used to find this module).
-# b) A configuration object that is the result of the merging
-# of cloud configs configuration with legacy configuration
-# as well as any datasource provided configuration
-# c) A cloud object that can be used to access various
-# datasource and paths for the given distro and data provided
-# by the various datasource instance types.
-# d) A argument list that may or may not be empty to this module.
-# Typically those are from module configuration where the module
-# is defined with some extra configuration that will eventually
-# be translated from yaml into arguments to this module.
-# 2. A optional 'frequency' that defines how often this module should be run.
-# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
-# provided PER_INSTANCE will be assumed.
-# See settings.py for these constants.
-# 3. A optional 'distros' array/set/tuple that defines the known distros
-# this module will work with (if not all of them). This is used to write
-# a warning out if a module is being ran on a untested distribution for
-# informational purposes. If non existent all distros are assumed and
-# no warning occurs.
-
-frequency = PER_INSTANCE
-
-
-def handle(name, _cfg, _cloud, log, _args):
- log.debug("Hi from module %s", name)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 43334caa..14a2c0b8 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -5,29 +5,34 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Growpart: Grow partitions"""
-"""
-Growpart
---------
-**Summary:** grow partitions
+import base64
+import copy
+import json
+import os
+import os.path
+import re
+import stat
+from contextlib import suppress
+from pathlib import Path
+from textwrap import dedent
+from typing import Tuple
+
+from cloudinit import log as logging
+from cloudinit import subp, temp_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
Growpart resizes partitions to fill the available disk space.
This is useful for cloud instances with a larger amount of disk space available
than the pristine image uses, as it allows the instance to automatically make
use of the extra space.
The devices on which to run growpart are specified as a list under the
-``devices`` key. Each entry in the devices list can be either the path to the
-device's mountpoint in the filesystem or a path to the block device in
-``/dev``.
-
-The utility to use for resizing can be selected using the ``mode`` config key.
-If the ``mode`` key is set to ``auto``, then any available utility (either
-``growpart`` or BSD ``gpart``) will be used. If neither utility is available,
-no error will be raised. If ``mode`` is set to ``growpart``, then the
-``growpart`` utility will be used. If this utility is not available on the
-system, this will result in an error. If ``mode`` is set to ``off`` or
-``false``, then ``cc_growpart`` will take no action.
+``devices`` key.
There is some functionality overlap between this module and the ``growroot``
functionality of ``cloud-initramfs-tools``. However, there are some situations
@@ -44,36 +49,41 @@ Growpart is enabled by default on the root partition. The default config for
growpart is::
growpart:
- mode: auto
- devices: ["/"]
- ignore_growroot_disabled: false
-
-**Internal name:** ``cc_growpart``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- growpart:
- mode: <auto/growpart/off/false>
- devices:
- - "/"
- - "/dev/vdb1"
- ignore_growroot_disabled: <true/false>
+ mode: auto
+ devices: ["/"]
+ ignore_growroot_disabled: false
"""
-
-import os
-import os.path
-import re
-import stat
-
-from cloudinit import log as logging
-from cloudinit import subp, temp_utils, util
-from cloudinit.settings import PER_ALWAYS
-
frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_growpart",
+ "name": "Growpart",
+ "title": "Grow partitions",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [
+ dedent(
+ """\
+ growpart:
+ mode: auto
+ devices: ["/"]
+ ignore_growroot_disabled: false
+ """
+ ),
+ dedent(
+ """\
+ growpart:
+ mode: growpart
+ devices:
+ - "/"
+ - "/dev/vdb1"
+ ignore_growroot_disabled: true
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
DEFAULT_CONFIG = {
"mode": "auto",
@@ -81,6 +91,8 @@ DEFAULT_CONFIG = {
"ignore_growroot_disabled": False,
}
+KEYDATA_PATH = Path("/cc_growpart_keydata")
+
class RESIZE(object):
SKIPPED = "SKIPPED"
@@ -289,10 +301,128 @@ def devent2dev(devent):
return dev
+def get_mapped_device(blockdev):
+ """Returns underlying block device for a mapped device.
+
+ If it is mapped, blockdev will usually take the form of
+ /dev/mapper/some_name
+
+ If blockdev is a symlink pointing to a /dev/dm-* device, return
+ the device pointed to. Otherwise, return None.
+ """
+ realpath = os.path.realpath(blockdev)
+ if realpath.startswith("/dev/dm-"):
+ LOG.debug("%s is a mapped device pointing to %s", blockdev, realpath)
+ return realpath
+ return None
+
+
+def is_encrypted(blockdev, partition) -> bool:
+ """
+ Check if a device is an encrypted device. blockdev should have
+ a /dev/dm-* path whereas partition is something like /dev/sda1.
+ """
+ if not subp.which("cryptsetup"):
+ LOG.debug("cryptsetup not found. Assuming no encrypted partitions")
+ return False
+ try:
+ subp.subp(["cryptsetup", "status", blockdev])
+ except subp.ProcessExecutionError as e:
+ if e.exit_code == 4:
+ LOG.debug("Determined that %s is not encrypted", blockdev)
+ else:
+ LOG.warning(
+ "Received unexpected exit code %s from "
+ "cryptsetup status. Assuming no encrypted partitions.",
+ e.exit_code,
+ )
+ return False
+ with suppress(subp.ProcessExecutionError):
+ subp.subp(["cryptsetup", "isLuks", partition])
+ LOG.debug("Determined that %s is encrypted", blockdev)
+ return True
+ return False
+
+
+def get_underlying_partition(blockdev):
+ command = ["dmsetup", "deps", "--options=devname", blockdev]
+ dep: str = subp.subp(command)[0] # type: ignore
+ # Returned result should look something like:
+ # 1 dependencies : (vdb1)
+ if not dep.startswith("1 depend"):
+ raise RuntimeError(
+ f"Expecting '1 dependencies' from 'dmsetup'. Received: {dep}"
+ )
+ try:
+ return f'/dev/{dep.split(": (")[1].split(")")[0]}'
+ except IndexError as e:
+ raise RuntimeError(
+ f"Ran `{command}`, but received unexpected stdout: `{dep}`"
+ ) from e
+
+
+def resize_encrypted(blockdev, partition) -> Tuple[str, str]:
+ """Use 'cryptsetup resize' to resize LUKS volume.
+
+ The loaded keyfile is json formatted with 'key' and 'slot' keys.
+ key is base64 encoded. Example:
+ {"key":"XFmCwX2FHIQp0LBWaLEMiHIyfxt1SGm16VvUAVledlY=","slot":5}
+ """
+ if not KEYDATA_PATH.exists():
+ return (RESIZE.SKIPPED, "No encryption keyfile found")
+ try:
+ with KEYDATA_PATH.open() as f:
+ keydata = json.load(f)
+ key = keydata["key"]
+ decoded_key = base64.b64decode(key)
+ slot = keydata["slot"]
+ except Exception as e:
+ raise RuntimeError(
+ "Could not load encryption key. This is expected if "
+ "the volume has been previously resized."
+ ) from e
+
+ try:
+ subp.subp(
+ ["cryptsetup", "--key-file", "-", "resize", blockdev],
+ data=decoded_key,
+ )
+ finally:
+ try:
+ subp.subp(
+ [
+ "cryptsetup",
+ "luksKillSlot",
+ "--batch-mode",
+ partition,
+ str(slot),
+ ]
+ )
+ except subp.ProcessExecutionError as e:
+ LOG.warning(
+ "Failed to kill luks slot after resizing encrypted volume: %s",
+ e,
+ )
+ try:
+ KEYDATA_PATH.unlink()
+ except Exception:
+ util.logexc(
+ LOG, "Failed to remove keyfile after resizing encrypted volume"
+ )
+
+ return (
+ RESIZE.CHANGED,
+ f"Successfully resized encrypted volume '{blockdev}'",
+ )
+
+
def resize_devices(resizer, devices):
# returns a tuple of tuples containing (entry-in-devices, action, message)
+ devices = copy.copy(devices)
info = []
- for devent in devices:
+
+ while devices:
+ devent = devices.pop(0)
try:
blockdev = devent2dev(devent)
except ValueError as e:
@@ -329,6 +459,49 @@ def resize_devices(resizer, devices):
)
continue
+ underlying_blockdev = get_mapped_device(blockdev)
+ if underlying_blockdev:
+ try:
+ # We need to resize the underlying partition first
+ partition = get_underlying_partition(blockdev)
+ if is_encrypted(underlying_blockdev, partition):
+ if partition not in [x[0] for x in info]:
+ # We shouldn't attempt to resize this mapped partition
+ # until the underlying partition is resized, so re-add
+ # our device to the beginning of the list we're
+ # iterating over, then add our underlying partition
+ # so it can get processed first
+ devices.insert(0, devent)
+ devices.insert(0, partition)
+ continue
+ status, message = resize_encrypted(blockdev, partition)
+ info.append(
+ (
+ devent,
+ status,
+ message,
+ )
+ )
+ else:
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ f"Resizing mapped device ({blockdev}) skipped "
+ "as it is not encrypted.",
+ )
+ )
+ except Exception as e:
+ info.append(
+ (
+ devent,
+ RESIZE.FAILED,
+ f"Resizing encrypted device ({blockdev}) failed: {e}",
+ )
+ )
+ # At this point, we WON'T resize a non-encrypted mapped device
+ # though we should probably grow the ability to
+ continue
try:
(disk, ptnum) = device_part_info(blockdev)
except (TypeError, ValueError) as e:
@@ -388,6 +561,11 @@ def handle(_name, cfg, _cloud, log, _args):
mode = mycfg.get("mode", "auto")
if util.is_false(mode):
+ if mode != "off":
+ log.warning(
+ f"DEPRECATED: growpart mode '{mode}' is deprecated. "
+ "Use 'off' instead."
+ )
log.debug("growpart disabled: mode=%s" % mode)
return
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index ad7243d9..c23e40f5 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -6,12 +6,17 @@
# Author: Matthew Ruffell <matthew.ruffell@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Grub Dpkg: Configure grub debconf installation device"""
-"""
-Grub Dpkg
----------
-**Summary:** configure grub debconf installation device
+import os
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.subp import ProcessExecutionError
+MODULE_DESCRIPTION = """\
Configure which device is used as the target for grub installation. This module
should work correctly by default without any user configuration. It can be
enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config
@@ -25,28 +30,28 @@ but we do fallback to the plain disk name if a by-id name is not present.
If this module is executed inside a container, then the debconf database is
seeded with empty values, and install_devices_empty is set to true.
-
-**Internal name:** ``cc_grub_dpkg``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
-
- grub_dpkg:
- enabled: <true/false>
- grub-pc/install_devices: <devices>
- grub-pc/install_devices_empty: <devices>
- grub-dpkg: (alias for grub_dpkg)
"""
-
-import os
-
-from cloudinit import subp, util
-from cloudinit.subp import ProcessExecutionError
-
distros = ["ubuntu", "debian"]
+meta: MetaSchema = {
+ "id": "cc_grub_dpkg",
+ "name": "Grub Dpkg",
+ "title": "Configure grub debconf installation device",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ grub_dpkg:
+ enabled: true
+ grub-pc/install_devices: /dev/sda
+ grub-pc/install_devices_empty: false
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def fetch_idevs(log):
@@ -121,14 +126,20 @@ def handle(name, cfg, _cloud, log, _args):
return
idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None
- )
-
if idevs is None:
idevs = fetch_idevs(log)
+
+ idevs_empty = mycfg.get("grub-pc/install_devices_empty")
if idevs_empty is None:
- idevs_empty = "false" if idevs else "true"
+ idevs_empty = not idevs
+ elif not isinstance(idevs_empty, bool):
+ log.warning(
+ "DEPRECATED: grub_dpkg: grub-pc/install_devices_empty value of "
+ f"'{idevs_empty}' is not boolean. Use of non-boolean values "
+ "will be removed in a future version of cloud-init."
+ )
+ idevs_empty = util.translate_bool(idevs_empty)
+ idevs_empty = str(idevs_empty).lower()
# now idevs and idevs_empty are set to determined values
# or, those set by user
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
index 34c4557e..a3668232 100644
--- a/cloudinit/config/cc_install_hotplug.py
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -4,22 +4,15 @@ import os
from textwrap import dedent
from cloudinit import stages, subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.event import EventScope, EventType
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-distros = [ALL_DISTROS]
-
meta: MetaSchema = {
"id": "cc_install_hotplug",
"name": "Install Hotplug",
- "title": "Install hotplug if supported and enabled",
+ "title": "Install hotplug udev rules if supported and enabled",
"description": dedent(
"""\
This module will install the udev rules to enable hotplug if
@@ -36,7 +29,8 @@ meta: MetaSchema = {
Currently supported datasources: Openstack, EC2
"""
),
- "distros": distros,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
"examples": [
dedent(
"""\
@@ -55,43 +49,9 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "updates": {
- "type": "object",
- "additionalProperties": False,
- "properties": {
- "network": {
- "type": "object",
- "required": ["when"],
- "additionalProperties": False,
- "properties": {
- "when": {
- "type": "array",
- "additionalProperties": False,
- "items": {
- "type": "string",
- "additionalProperties": False,
- "enum": [
- "boot-new-instance",
- "boot-legacy",
- "boot",
- "hotplug",
- ],
- },
- }
- },
- }
- },
- }
- },
}
-__doc__ = get_meta_doc(meta, schema)
+__doc__ = get_meta_doc(meta)
HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
@@ -105,7 +65,6 @@ LABEL="cloudinit_end"
def handle(_name, cfg, cloud, log, _args):
- validate_cloudconfig_schema(cfg, schema)
network_hotplug_enabled = (
"updates" in cfg
and "network" in cfg["updates"]
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
index 98ef326a..211cb015 100644
--- a/cloudinit/config/cc_keyboard.py
+++ b/cloudinit/config/cc_keyboard.py
@@ -10,31 +10,21 @@ from textwrap import dedent
from cloudinit import distros
from cloudinit import log as logging
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-
# FIXME: setting keyboard layout should be supported by all OSes.
# But currently only implemented for Linux distributions that use systemd.
-osfamilies = ["arch", "debian", "redhat", "suse"]
-distros = distros.Distro.expand_osfamily(osfamilies)
DEFAULT_KEYBOARD_MODEL = "pc105"
+distros = distros.Distro.expand_osfamily(["arch", "debian", "redhat", "suse"])
+
meta: MetaSchema = {
"id": "cc_keyboard",
"name": "Keyboard",
"title": "Set keyboard layout",
- "description": dedent(
- """\
- Handle keyboard configuration.
- """
- ),
+ "description": "Handle keyboard configuration.",
"distros": distros,
"examples": [
dedent(
@@ -55,57 +45,11 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
+ "frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "keyboard": {
- "type": "object",
- "properties": {
- "layout": {
- "type": "string",
- "description": dedent(
- """\
- Required. Keyboard layout. Corresponds to XKBLAYOUT.
- """
- ),
- },
- "model": {
- "type": "string",
- "default": DEFAULT_KEYBOARD_MODEL,
- "description": dedent(
- """\
- Optional. Keyboard model. Corresponds to XKBMODEL.
- """
- ),
- },
- "variant": {
- "type": "string",
- "description": dedent(
- """\
- Optional. Keyboard variant. Corresponds to XKBVARIANT.
- """
- ),
- },
- "options": {
- "type": "string",
- "description": dedent(
- """\
- Optional. Keyboard options. Corresponds to XKBOPTIONS.
- """
- ),
- },
- },
- "required": ["layout"],
- "additionalProperties": False,
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema)
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
@@ -116,7 +60,6 @@ def handle(name, cfg, cloud, log, args):
"Skipping module named %s, no 'keyboard' section found", name
)
return
- validate_cloudconfig_schema(cfg, schema)
kb_cfg = cfg["keyboard"]
layout = kb_cfg["layout"]
model = kb_cfg.get("model", DEFAULT_KEYBOARD_MODEL)
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index ab35e136..dd8b92fe 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -6,46 +6,64 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Keys to Console
----------------
-**Summary:** control which SSH host keys may be written to console
-
-For security reasons it may be desirable not to write SSH host keys and their
-fingerprints to the console. To avoid either being written to the console the
-``emit_keys_to_console`` config key under the main ``ssh`` config key can be
-used. To avoid the fingerprint of types of SSH host keys being written to
-console the ``ssh_fp_console_blacklist`` config key can be used. By default
-all types of keys will have their fingerprints written to console. To avoid
-host keys of a key type being written to console the
-``ssh_key_console_blacklist`` config key can be used. By default ``ssh-dss``
-host keys are not written to console.
-
-**Internal name:** ``cc_keys_to_console``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- ssh:
- emit_keys_to_console: false
-
- ssh_fp_console_blacklist: <list of key types>
- ssh_key_console_blacklist: <list of key types>
-"""
+"""Keys to Console: Control which SSH host keys may be written to console"""
import os
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-
# This is a tool that cloud init provides
HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints"
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_keys_to_console",
+ "name": "Keys to Console",
+ "title": "Control which SSH host keys may be written to console",
+ "description": (
+ "For security reasons it may be desirable not to write SSH host keys"
+ " and their fingerprints to the console. To avoid either being written"
+ " to the console the ``emit_keys_to_console`` config key under the"
+ " main ``ssh`` config key can be used. To avoid the fingerprint of"
+ " types of SSH host keys being written to console the"
+ " ``ssh_fp_console_blacklist`` config key can be used. By default,"
+ " all types of keys will have their fingerprints written to console."
+ " To avoid host keys of a key type being written to console the"
+ "``ssh_key_console_blacklist`` config key can be used. By default,"
+ " ``ssh-dss`` host keys are not written to console."
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Do not print any SSH keys to system console
+ ssh:
+ emit_keys_to_console: false
+ """
+ ),
+ dedent(
+ """\
+ # Do not print certain ssh key types to console
+ ssh_key_console_blacklist: [dsa, ssh-dss]
+ """
+ ),
+ dedent(
+ """\
+ # Do not print specific ssh key fingerprints to console
+ ssh_fp_console_blacklist:
+ - E25451E0221B5773DEBFF178ECDACB160995AA89
+ - FE76292D55E8B28EE6DB2B34B2D8A784F8C0AAB0
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+__doc__ = get_meta_doc(meta)
+
def _get_helper_tool_path(distro):
try:
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 03ebf411..ede09bd9 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -6,17 +6,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Landscape
----------
-**Summary:** install and configure landscape client
+"""install and configure landscape client"""
+
+import os
+from io import BytesIO
+from textwrap import dedent
+
+from configobj import ConfigObj
+
+from cloudinit import subp, type_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
+LS_DEFAULT_FILE = "/etc/default/landscape-client"
+
+# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
+LSC_BUILTIN_CFG = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ }
+}
+
+MODULE_DESCRIPTION = """\
This module installs and configures ``landscape-client``. The landscape client
will only be installed if the key ``landscape`` is present in config. Landscape
client configuration is given under the ``client`` key under the main
``landscape`` config key. The config parameters are not interpreted by
cloud-init, but rather are converted into a ConfigObj formatted file and
-written out to ``/etc/landscape/client.conf``.
+written out to the `[client]` section in ``/etc/landscape/client.conf``.
The following default client config is provided, but can be overridden::
@@ -33,53 +54,47 @@ The following default client config is provided, but can be overridden::
.. note::
if ``tags`` is defined, its contents should be a string delimited with
``,`` rather than a list
-
-**Internal name:** ``cc_landscape``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- landscape:
- client:
- url: "https://landscape.canonical.com/message-system"
- ping_url: "http://landscape.canonical.com/ping"
- data_path: "/var/lib/landscape/client"
- http_proxy: "http://my.proxy.com/foobar"
- https_proxy: "https://my.proxy.com/foobar"
- tags: "server,cloud"
- computer_title: "footitle"
- registration_key: "fookey"
- account_name: "fooaccount"
"""
-
-import os
-from io import BytesIO
-
-from configobj import ConfigObj
-
-from cloudinit import subp, type_utils, util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
-LS_DEFAULT_FILE = "/etc/default/landscape-client"
-
distros = ["ubuntu"]
-# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-LSC_BUILTIN_CFG = {
- "client": {
- "log_level": "info",
- "url": "https://landscape.canonical.com/message-system",
- "ping_url": "http://landscape.canonical.com/ping",
- "data_path": "/var/lib/landscape/client",
- }
+meta: MetaSchema = {
+ "id": "cc_landscape",
+ "name": "Landscape",
+ "title": "Install and configure landscape client",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # To discover additional supported client keys, run
+ # man landscape-config.
+ landscape:
+ client:
+ url: "https://landscape.canonical.com/message-system"
+ ping_url: "http://landscape.canonical.com/ping"
+ data_path: "/var/lib/landscape/client"
+ http_proxy: "http://my.proxy.com/foobar"
+ https_proxy: "https://my.proxy.com/foobar"
+ tags: "server,cloud"
+ computer_title: "footitle"
+ registration_key: "fookey"
+ account_name: "fooaccount"
+ """
+ ),
+ dedent(
+ """\
+ # Any keys below `client` are optional and the default values will
+ # be used.
+ landscape:
+ client: {}
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
}
+__doc__ = get_meta_doc(meta)
+
def handle(_name, cfg, cloud, log, _args):
"""
@@ -102,6 +117,7 @@ def handle(_name, cfg, cloud, log, _args):
cloud.distro.install_packages(("landscape-client",))
+ # Later order config values override earlier values
merge_data = [
LSC_BUILTIN_CFG,
LSC_CLIENT_CFG_FILE,
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 29f6a9b6..6a31933e 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -11,15 +11,11 @@
from textwrap import dedent
from cloudinit import util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
distros = ["all"]
+
meta: MetaSchema = {
"id": "cc_locale",
"name": "Locale",
@@ -45,29 +41,10 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "locale": {
- "type": "string",
- "description": (
- "The locale to set as the system's locale (e.g. ar_PS)"
- ),
- },
- "locale_configfile": {
- "type": "string",
- "description": (
- "The file in which to write the locale configuration (defaults"
- " to the distro's default location)"
- ),
- },
- },
+ "frequency": PER_INSTANCE,
}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
@@ -82,8 +59,6 @@ def handle(name, cfg, cloud, log, args):
)
return
- validate_cloudconfig_schema(cfg, schema)
-
log.debug("Setting locale to %s", locale)
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 13ddcbe9..847a7c3c 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -4,59 +4,75 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-LXD
----
-**Summary:** configure lxd with ``lxd init`` and optionally lxd-bridge
+"""LXD: configure lxd with ``lxd init`` and optionally lxd-bridge"""
+
+import os
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+_DEFAULT_NETWORK_NAME = "lxdbr0"
+
+MODULE_DESCRIPTION = """\
This module configures lxd with user specified options using ``lxd init``.
If lxd is not present on the system but lxd configuration is provided, then
lxd will be installed. If the selected storage backend is zfs, then zfs will
be installed if missing. If network bridge configuration is provided, then
lxd-bridge will be configured accordingly.
-
-**Internal name:** ``cc_lxd``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- lxd:
- init:
- network_address: <ip addr>
- network_port: <port>
- storage_backend: <zfs/dir>
- storage_create_device: <dev>
- storage_create_loop: <size>
- storage_pool: <name>
- trust_password: <password>
- bridge:
- mode: <new, existing or none>
- name: <name>
- ipv4_address: <ip addr>
- ipv4_netmask: <cidr>
- ipv4_dhcp_first: <ip addr>
- ipv4_dhcp_last: <ip addr>
- ipv4_dhcp_leases: <size>
- ipv4_nat: <bool>
- ipv6_address: <ip addr>
- ipv6_netmask: <cidr>
- ipv6_nat: <bool>
- domain: <domain>
"""
-import os
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-
distros = ["ubuntu"]
-LOG = logging.getLogger(__name__)
-
-_DEFAULT_NETWORK_NAME = "lxdbr0"
+meta: MetaSchema = {
+ "id": "cc_lxd",
+ "name": "LXD",
+ "title": "Configure LXD with ``lxd init`` and optionally lxd-bridge",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Simplest working directory backed LXD configuration
+ lxd:
+ init:
+ storage_backend: dir
+ """
+ ),
+ dedent(
+ """\
+ lxd:
+ init:
+ network_address: 0.0.0.0
+ network_port: 8443
+ storage_backend: zfs
+ storage_pool: datapool
+ storage_create_loop: 10
+ bridge:
+ mode: new
+ name: lxdbr0
+ ipv4_address: 10.0.8.1
+ ipv4_netmask: 24
+ ipv4_dhcp_first: 10.0.8.2
+ ipv4_dhcp_last: 10.0.8.3
+ ipv4_dhcp_leases: 250
+ ipv4_nat: true
+ ipv6_address: fd98:9e0:3744::1
+ ipv6_netmask: 64
+ ipv6_nat: true
+ domain: lxd
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
@@ -300,8 +316,8 @@ def maybe_cleanup_default(
"""Newer versions of lxc (3.0.1+) create a lxdbr0 network when
'lxd init --auto' is run. Older versions did not.
- By removing ay that lxd-init created, we simply leave the add/attach
- code in-tact.
+ By removing any that lxd-init created, we simply leave the add/attach
+ code intact.
https://github.com/lxc/lxd/issues/4649"""
if net_name != _DEFAULT_NETWORK_NAME or not did_init:
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 1b0158ec..33f7556d 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -7,11 +7,28 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Mcollective
------------
-**Summary:** install, configure and start mcollective
+""" Mcollective: Install, configure and start mcollective"""
+
+import errno
+import io
+from textwrap import dedent
+
+# Used since this can maintain comments
+# and doesn't need a top level section
+from configobj import ConfigObj
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
+PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
+SERVER_CFG = "/etc/mcollective/server.cfg"
+
+LOG = logging.getLogger(__name__)
+
+MODULE_DESCRIPTION = """\
This module installs, configures and starts mcollective. If the ``mcollective``
key is present in config, then mcollective will be installed and started.
@@ -26,43 +43,48 @@ private certificates for mcollective. Their values will be written to
.. note::
The ec2 metadata service is readable by non-root users.
If security is a concern, use include-once and ssl urls.
-
-**Internal name:** ``cc_mcollective``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- mcollective:
- conf:
- <key>: <value>
- public-cert: |
- -------BEGIN CERTIFICATE--------
- <cert data>
- -------END CERTIFICATE--------
- private-cert: |
- -------BEGIN CERTIFICATE--------
- <cert data>
- -------END CERTIFICATE--------
"""
-import errno
-import io
-
-# Used since this can maintain comments
-# and doesn't need a top level section
-from configobj import ConfigObj
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-
-PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
-PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = "/etc/mcollective/server.cfg"
-
-LOG = logging.getLogger(__name__)
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_mcollective",
+ "name": "Mcollective",
+ "title": "Install, configure and start mcollective",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Provide server private and public key and provide the following
+ # config settings in /etc/mcollective/server.cfg:
+ # loglevel: debug
+ # plugin.stomp.host: dbhost
+
+ # WARNING WARNING WARNING
+ # The ec2 metadata service is a network service, and thus is
+ # readable by non-root users on the system
+ # (ie: 'ec2metadata --user-data')
+ # If you want security for this, please use include-once + SSL urls
+ mcollective:
+ conf:
+ loglevel: debug
+ plugin.stomp.host: dbhost
+ public-cert: |
+ -------BEGIN CERTIFICATE--------
+ <cert data>
+ -------END CERTIFICATE--------
+ private-cert: |
+ -------BEGIN CERTIFICATE--------
+ <cert data>
+ -------END CERTIFICATE--------
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
def configure(
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 4fafb4af..6aed54b3 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -4,36 +4,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Migrator
---------
-**Summary:** migrate old versions of cloud-init data to new
+"""Migrator: Migrate old versions of cloud-init data to new"""
+
+import os
+import shutil
+
+from cloudinit import helpers, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module handles moving old versions of cloud-init data to newer ones.
Currently, it only handles renaming cloud-init's per-frequency semaphore files
to canonicalized name and renaming legacy semaphore names to newer ones. This
module is enabled by default, but can be disabled by specifying ``migrate:
false`` in config.
-
-**Internal name:** ``cc_migrator``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- migrate: <true/false>
"""
-import os
-import shutil
-
-from cloudinit import helpers, util
-from cloudinit.settings import PER_ALWAYS
-
+distros = ["all"]
frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_migrator",
+ "name": "Migrator",
+ "title": "Migrate old versions of cloud-init data to new",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": ["# Do not migrate cloud-init semaphores\nmigrate: false\n"],
+ "frequency": frequency,
+}
+
+__doc__ = get_meta_doc(meta)
+
def _migrate_canon_sems(cloud):
paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 83eb5b1b..1d05c9b9 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -6,11 +6,19 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Mounts
-------
-**Summary:** configure mount points and swap files
+"""Mounts: Configure mount points and swap files"""
+
+import logging
+import os
+import re
+from string import whitespace
+from textwrap import dedent
+
+from cloudinit import subp, type_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module can add or remove mountpoints from ``/etc/fstab`` as well as
configure swap. The ``mounts`` config key takes a list of fstab entries to add.
Each entry is specified as a list of ``[ fs_spec, fs_file, fs_vfstype,
@@ -19,55 +27,79 @@ consult the manual for ``/etc/fstab``. When specifying the ``fs_spec``, if the
device name starts with one of ``xvd``, ``sd``, ``hd``, or ``vd``, the leading
``/dev`` may be omitted.
-In order to remove a previously listed mount, an entry can be added to the
-mounts list containing ``fs_spec`` for the device to be removed but no
-mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``).
+Any mounts that do not appear to either an attached block device or network
+resource will be skipped with a log like "Ignoring nonexistent mount ...".
+
+Cloud-init will attempt to add the following mount directives if available and
+unconfigured in `/etc/fstab`::
+
+ mounts:
+ - ["ephemeral0", "/mnt", "auto",\
+"defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"]
+ - ["swap", "none", "swap", "sw", "0", "0"]
+
+In order to remove a previously listed mount, an entry can be added to
+the `mounts` list containing ``fs_spec`` for the device to be removed but no
+mountpoint (i.e. ``[ swap ]`` or ``[ swap, null ]``).
The ``mount_default_fields`` config key allows default options to be specified
for the values in a ``mounts`` entry that are not specified, aside from the
``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 6
values. It defaults to::
- mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"]
-
-On a systemd booted system that default is the mostly equivalent::
-
- mount_default_fields: [none, none, "auto",
- "defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"]
+ mount_default_fields: [none, none, "auto",\
+"defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"]
-Note that `nobootwait` is an upstart specific boot option that somewhat
-equates to the more standard `nofail`.
+Non-systemd init systems will vary in ``mount_default_fields``.
Swap files can be configured by setting the path to the swap file to create
with ``filename``, the size of the swap file with ``size`` maximum size of
the swap file if using an ``size: auto`` with ``maxsize``. By default no
swap file is created.
+"""
-**Internal name:** ``cc_mounts``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
+example = dedent(
+ """\
+ # Mount ephemeral0 with "noexec" flag, /dev/sdc with mount_default_fields,
+ # and /dev/xvdh with custom fs_passno "0" to avoid fsck on the mount.
+ # Also provide an automatically sized swap with a max size of 10485760
+ # bytes.
mounts:
- [ /dev/ephemeral0, /mnt, auto, "defaults,noexec" ]
- [ sdc, /opt/data ]
- - [ xvdh, /opt/data, "auto", "defaults,nofail", "0", "0" ]
- mount_default_fields: [None, None, "auto", "defaults,nofail", "0", "2"]
+ - [ xvdh, /opt/data, auto, "defaults,nofail", "0", "0" ]
+ mount_default_fields: [None, None, auto, "defaults,nofail", "0", "2"]
swap:
- filename: <file>
- size: <"auto"/size in bytes>
- maxsize: <size in bytes>
-"""
-
-import logging
-import os
-import re
-from string import whitespace
-
-from cloudinit import subp, type_utils, util
+ filename: /my/swapfile
+ size: auto
+ maxsize: 10485760
+ """
+)
+
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_mounts",
+ "name": "Mounts",
+ "title": "Configure mount points and swap files",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ example,
+ dedent(
+ """\
+ # Create a 2 GB swap file at /swapfile using human-readable values
+ swap:
+ filename: /swapfile
+ size: 2G
+ maxsize: 2G
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
@@ -178,7 +210,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
if memsize is None:
memsize = util.read_meminfo()["total"]
- GB = 2 ** 30
+ GB = 2**30
sugg_max = 8 * GB
info = {"avail": "na", "max_in": maxsize, "mem": memsize}
@@ -230,7 +262,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
info["size"] = size
- MB = 2 ** 20
+ MB = 2**20
pinfo = {}
for k, v in info.items():
if isinstance(v, int):
@@ -324,7 +356,7 @@ def setup_swapfile(fname, size=None, maxsize=None):
fsys=swap_dir, maxsize=maxsize, memsize=memsize
)
- mibsize = str(int(size / (2 ** 20)))
+ mibsize = str(int(size / (2**20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 25bba764..3bc1d303 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -12,11 +12,7 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, temp_utils, templater, type_utils, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -210,137 +206,14 @@ meta: MetaSchema = {
],
"frequency": PER_INSTANCE,
}
+__doc__ = get_meta_doc(meta)
+
-schema = {
- "type": "object",
- "properties": {
- "ntp": {
- "type": ["object", "null"],
- "properties": {
- "pools": {
- "type": "array",
- "items": {"type": "string", "format": "hostname"},
- "uniqueItems": True,
- "description": dedent(
- """\
- List of ntp pools. If both pools and servers are
- empty, 4 default pool servers will be provided of
- the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
- for Alpine Linux when using the Busybox NTP client
- this setting will be ignored due to the limited
- functionality of Busybox's ntpd."""
- ),
- },
- "servers": {
- "type": "array",
- "items": {"type": "string", "format": "hostname"},
- "uniqueItems": True,
- "description": dedent(
- """\
- List of ntp servers. If both pools and servers are
- empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``."""
- ),
- },
- "ntp_client": {
- "type": "string",
- "default": "auto",
- "description": dedent(
- """\
- Name of an NTP client to use to configure system NTP.
- When unprovided or 'auto' the default client preferred
- by the distribution will be used. The following
- built-in client names can be used to override existing
- configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""
- ),
- },
- "enabled": {
- "type": "boolean",
- "default": True,
- "description": dedent(
- """\
- Attempt to enable ntp clients if set to True. If set
- to False, ntp client will not be configured or
- installed"""
- ),
- },
- "config": {
- "description": dedent(
- """\
- Configuration settings or overrides for the
- ``ntp_client`` specified."""
- ),
- "type": ["object"],
- "properties": {
- "confpath": {
- "type": "string",
- "description": dedent(
- """\
- The path to where the ``ntp_client``
- configuration is written."""
- ),
- },
- "check_exe": {
- "type": "string",
- "description": dedent(
- """\
- The executable name for the ``ntp_client``.
- For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""
- ),
- },
- "packages": {
- "type": "array",
- "items": {
- "type": "string",
- },
- "uniqueItems": True,
- "description": dedent(
- """\
- List of packages needed to be installed for the
- selected ``ntp_client``."""
- ),
- },
- "service_name": {
- "type": "string",
- "description": dedent(
- """\
- The systemd or sysvinit service name used to
- start and stop the ``ntp_client``
- service."""
- ),
- },
- "template": {
- "type": "string",
- "description": dedent(
- """\
- Inline template allowing users to define their
- own ``ntp_client`` configuration template.
- The value must start with '## template:jinja'
- to enable use of templating support.
- """
- ),
- },
- },
- # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
- # of builtin client values.
- "minProperties": 1, # If we have config, define something
- "additionalProperties": False,
- },
- },
- "additionalProperties": False,
- }
- },
-}
REQUIRED_NTP_CONFIG_KEYS = frozenset(
["check_exe", "confpath", "packages", "service_name"]
)
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
-
-
def distro_ntp_client_configs(distro):
"""Construct a distro-specific ntp client config dictionary by merging
distro specific changes into base config.
@@ -604,8 +477,6 @@ def handle(name, cfg, cloud, log, _args):
" is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))
)
- validate_cloudconfig_schema(cfg, schema)
-
# Allow users to explicitly enable/disable
enabled = ntp_cfg.get("enabled", True)
if util.is_false(enabled):
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 14cdfab8..5198305e 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -4,50 +4,53 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Package Update Upgrade Install
-------------------------------
-**Summary:** update, upgrade, and install packages
-
-This module allows packages to be updated, upgraded or installed during boot.
-If any packages are to be installed or an upgrade is to be performed then the
-package cache will be updated first. If a package installation or upgrade
-requires a reboot, then a reboot can be performed if
-``package_reboot_if_required`` is specified. A list of packages to install can
-be provided. Each entry in the list can be either a package name or a list with
-two entries, the first being the package name and the second being the specific
-package version to install.
-
-**Internal name:** ``cc_package_update_upgrade_install``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- packages:
- - pwgen
- - pastebinit
- - [libpython2.7, 2.7.3-0ubuntu3.1]
- package_update: <true/false>
- package_upgrade: <true/false>
- package_reboot_if_required: <true/false>
-
- apt_update: (alias for package_update)
- apt_upgrade: (alias for package_upgrade)
- apt_reboot_if_required: (alias for package_reboot_if_required)
-"""
+"""Package Update Upgrade Install: update, upgrade, and install packages"""
import os
import time
+from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
REBOOT_FILE = "/var/run/reboot-required"
REBOOT_CMD = ["/sbin/reboot"]
+MODULE_DESCRIPTION = """\
+This module allows packages to be updated, upgraded or installed during boot.
+If any packages are to be installed or an upgrade is to be performed then the
+package cache will be updated first. If a package installation or upgrade
+requires a reboot, then a reboot can be performed if
+``package_reboot_if_required`` is specified.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_package_update_upgrade_install",
+ "name": "Package Update Upgrade Install",
+ "title": "Update, upgrade, and install packages",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ packages:
+ - pwgen
+ - pastebinit
+ - [libpython3.8, 3.8.10-0ubuntu1~20.04.2]
+ package_update: true
+ package_upgrade: true
+ package_reboot_if_required: true
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
def _multi_cfg_bool_get(cfg, *keys):
for k in keys:
@@ -60,7 +63,7 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
subp.subp(REBOOT_CMD)
start = time.time()
wait_time = initial_sleep
- for _i in range(0, wait_attempts):
+ for _i in range(wait_attempts):
time.sleep(wait_time)
wait_time *= backoff
elapsed = time.time() - start
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index a0e1da78..681c3729 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -6,11 +6,28 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Phone Home
-----------
-**Summary:** post data to url
+"""Phone Home: Post data to url"""
+
+from textwrap import dedent
+
+from cloudinit import templater, url_helper, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+frequency = PER_INSTANCE
+
+POST_LIST_ALL = [
+ "pub_key_dsa",
+ "pub_key_rsa",
+ "pub_key_ecdsa",
+ "pub_key_ed25519",
+ "instance_id",
+ "hostname",
+ "fqdn",
+]
+
+MODULE_DESCRIPTION = """\
This module can be used to post data to a remote host after boot is complete.
If the post url contains the string ``$INSTANCE_ID`` it will be replaced with
the id of the current instance. Either all data can be posted or a list of
@@ -26,7 +43,9 @@ keys to post. Available keys are:
Data is sent as ``x-www-form-urlencoded`` arguments.
-**Example HTTP POST**::
+**Example HTTP POST**:
+
+.. code-block:: http
POST / HTTP/1.1
Content-Length: 1337
@@ -36,39 +55,42 @@ Data is sent as ``x-www-form-urlencoded`` arguments.
Content-Type: application/x-www-form-urlencoded
pub_key_dsa=dsa_contents&pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal
-
-**Internal name:** ``cc_phone_home``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- phone_home:
- url: http://example.com/$INSTANCE_ID/
- post:
- - pub_key_dsa
- - instance_id
- - fqdn
- tries: 10
"""
-from cloudinit import templater, url_helper, util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-POST_LIST_ALL = [
- "pub_key_dsa",
- "pub_key_rsa",
- "pub_key_ecdsa",
- "pub_key_ed25519",
- "instance_id",
- "hostname",
- "fqdn",
-]
-
+meta: MetaSchema = {
+ "id": "cc_phone_home",
+ "name": "Phone Home",
+ "title": "Post data to url",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ phone_home:
+ url: http://example.com/$INSTANCE_ID/
+ post: all
+ """
+ ),
+ dedent(
+ """\
+ phone_home:
+ url: http://example.com/$INSTANCE_ID/
+ post:
+ - pub_key_dsa
+ - pub_key_rsa
+ - pub_key_ecdsa
+ - pub_key_ed25519
+ - instance_id
+ - hostname
+ - fqdn
+ tries: 5
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
# phone_home:
# url: http://my.foo.bar/$INSTANCE/
@@ -80,6 +102,8 @@ POST_LIST_ALL = [
# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id, hostname,
# fqdn ]
#
+
+
def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
@@ -105,8 +129,8 @@ def handle(name, cfg, cloud, log, args):
post_list = ph_cfg.get("post", "all")
tries = ph_cfg.get("tries")
try:
- tries = int(tries)
- except Exception:
+ tries = int(tries) # type: ignore
+ except ValueError:
tries = 10
util.logexc(
log,
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index d4eb68c0..7fc4e5ca 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -4,67 +4,78 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Power State Change
-------------------
-**Summary:** change power state
-
-This module handles shutdown/reboot after all config modules have been run. By
-default it will take no action, and the system will keep running unless a
-package installation/upgrade requires a system reboot (e.g. installing a new
-kernel) and ``package_reboot_if_required`` is true. The ``power_state`` config
-key accepts a dict of options. If ``mode`` is any value other than
-``poweroff``, ``halt``, or ``reboot``, then no action will be taken.
-
-The system
-can be shutdown before cloud-init has finished using the ``timeout`` option.
-The ``delay`` key specifies a duration to be added onto any shutdown command
-used. Therefore, if a 5 minute delay and a 120 second shutdown are specified,
-the maximum amount of time between cloud-init starting and the system shutting
-down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay``
-key must have an argument in either the form ``'+5'`` for 5 minutes or ``now``
-for immediate shutdown.
-
-Optionally, a command can be run to determine whether or not
-the system should shut down. The command to be run should be specified in the
-``condition`` key. For command formatting, see the documentation for
-``cc_runcmd``. The specified shutdown behavior will only take place if the
-``condition`` key is omitted or the command specified by the ``condition``
-key returns 0.
-
-.. note::
- With Alpine Linux any message value specified is ignored as Alpine's halt,
- poweroff, and reboot commands do not support broadcasting a message.
-
-**Internal name:** ``cc_power_state_change``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- power_state:
- delay: <now/'+minutes'>
- mode: <poweroff/halt/reboot>
- message: <shutdown message>
- timeout: <seconds>
- condition: <true/false/command>
-"""
+"""Power State Change: Change power state"""
import errno
import os
import re
import subprocess
import time
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
EXIT_FAIL = 254
+MODULE_DESCRIPTION = """\
+This module handles shutdown/reboot after all config modules have been run. By
+default it will take no action, and the system will keep running unless a
+package installation/upgrade requires a system reboot (e.g. installing a new
+kernel) and ``package_reboot_if_required`` is true.
+
+Using this module ensures that cloud-init is entirely finished with
+modules that would be executed.
+
+An example to distinguish delay from timeout:
+
+If you delay 5 (5 minutes) and have a timeout of
+120 (2 minutes), then the max time until shutdown will be 7 minutes, though
+it could be as soon as 5 minutes. Cloud-init will invoke 'shutdown +5' after
+the process finishes, or when 'timeout' seconds have elapsed.
+
+.. note::
+ With Alpine Linux any message value specified is ignored as Alpine's halt,
+ poweroff, and reboot commands do not support broadcasting a message.
+
+"""
+
+meta: MetaSchema = {
+ "id": "cc_power_state_change",
+ "name": "Power State Change",
+ "title": "Change power state",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ power_state:
+ delay: now
+ mode: poweroff
+ message: Powering off
+ timeout: 2
+ condition: true
+ """
+ ),
+ dedent(
+ """\
+ power_state:
+ delay: 30
+ mode: reboot
+ message: Rebooting machine
+ condition: test -f /var/tmp/reboot_me
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
def givecmdline(pid):
# Returns the cmdline for the given process id. In Linux we can use procfs
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index f51f49bc..c0b073b5 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -6,20 +6,30 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Puppet
-------
-**Summary:** install, configure and start puppet
+"""Puppet: Install, configure and start puppet"""
+
+import os
+import socket
+from io import StringIO
+from textwrap import dedent
+
+import yaml
+from cloudinit import helpers, subp, temp_utils, url_helper, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
+PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
+
+MODULE_DESCRIPTION = """\
This module handles puppet installation and configuration. If the ``puppet``
key does not exist in global configuration, no action will be taken. If a
config entry for ``puppet`` is present, then by default the latest version of
-puppet will be installed. If ``install`` is set to ``false``, puppet will not
-be installed. However, this will result in an error if puppet is not already
-present on the system. The version of puppet to be installed can be specified
-under ``version``, and defaults to ``none``, which selects the latest version
-in the repos. If the ``puppet`` config key exists in the config archive, this
-module will attempt to start puppet even if no installation was performed.
+puppet will be installed. If the ``puppet`` config key exists in the config
+archive, this module will attempt to start puppet even if no installation was
+performed.
The module also provides keys for configuring the new puppet 4 paths and
installing the puppet package from the puppetlabs repositories:
@@ -28,94 +38,69 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
``csr_attributes_path``. If unset, their values will default to
ones that work with puppet 3.x and with distributions that ship modified
puppet 4.x that uses the old paths.
-
-Agent packages from the puppetlabs repositories can be installed by setting
-``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR
-paths will be adjusted accordingly. To maintain backwards compatibility this
-setting defaults to ``packages`` which will install puppet from the distro
-packages.
-
-If installing ``aio`` packages, ``collection`` can also be set to one of
-``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly
-counterparts) in order to install specific release streams. By default, the
-puppetlabs repository will be purged after installation finishes; set
-``cleanup`` to ``false`` to prevent this. AIO packages are installed through a
-shell script which is downloaded on the machine and then executed; the path to
-this script can be overridden using the ``aio_install_url`` key.
-
-Puppet configuration can be specified under the ``conf`` key. The
-configuration is specified as a dictionary containing high-level ``<section>``
-keys and lists of ``<key>=<value>`` pairs within each section. Each section
-name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
-such, section names should be one of: ``main``, ``server``, ``agent`` or
-``user`` and keys should be valid puppet configuration options. The
-``certname`` key supports string substitutions for ``%i`` and ``%f``,
-corresponding to the instance id and fqdn of the machine respectively.
-If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
-instead will be used as the puppetserver certificate. It should be specified
-in pem format as a multi-line string (using the ``|`` yaml notation).
-
-Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR
-attributes and certificate extension requests.
-See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
-
-By default, the puppet service will be automatically enabled after installation
-and set to automatically start on boot. To override this in favor of manual
-puppet execution set ``start_service`` to ``false``.
-
-A single manual run can be triggered by setting ``exec`` to ``true``, and
-additional arguments can be passed to ``puppet agent`` via the ``exec_args``
-key (by default the agent will execute with the ``--test`` flag).
-
-**Internal name:** ``cc_puppet``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- puppet:
- install: <true/false>
- version: <version>
- collection: <aio collection>
- install_type: <packages/aio>
- aio_install_url: 'https://git.io/JBhoQ'
- cleanup: <true/false>
- conf_file: '/etc/puppet/puppet.conf'
- ssl_dir: '/var/lib/puppet/ssl'
- csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
- package_name: 'puppet'
- exec: <true/false>
- exec_args: ['--test']
- start_service: <true/false>
- conf:
- agent:
- server: "puppetserver.example.org"
- certname: "%i.%f"
- ca_cert: |
- -------BEGIN CERTIFICATE-------
- <cert data>
- -------END CERTIFICATE-------
- csr_attributes:
- custom_attributes:
- 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
- extension_requests:
- pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
- pp_image_name: my_ami_image
- pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
"""
-import os
-import socket
-from io import StringIO
-
-import yaml
-
-from cloudinit import helpers, subp, temp_utils, url_helper, util
-
-AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
-PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
+meta: MetaSchema = {
+ "id": "cc_puppet",
+ "name": "Puppet",
+ "title": "Install, configure and start puppet",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ puppet:
+ install: true
+ version: "7.7.0"
+ install_type: "aio"
+ collection: "puppet7"
+ aio_install_url: 'https://git.io/JBhoQ'
+ cleanup: true
+ conf_file: "/etc/puppet/puppet.conf"
+ ssl_dir: "/var/lib/puppet/ssl"
+ csr_attributes_path: "/etc/puppet/csr_attributes.yaml"
+ exec: true
+ exec_args: ['--test']
+ conf:
+ agent:
+ server: "puppetserver.example.org"
+ certname: "%i.%f"
+ ca_cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
+ csr_attributes:
+ custom_attributes:
+ 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
+ extension_requests:
+ pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
+ pp_image_name: my_ami_image
+ pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
+ """ # noqa: E501
+ ),
+ dedent(
+ """\
+ puppet:
+ install_type: "packages"
+ package_name: "puppet"
+ exec: false
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
class PuppetConstants(object):
@@ -142,10 +127,8 @@ def _autostart_puppet(log):
],
capture=False,
)
- elif os.path.exists("/bin/systemctl"):
- subp.subp(
- ["/bin/systemctl", "enable", "puppet.service"], capture=False
- )
+ elif subp.which("systemctl"):
+ subp.subp(["systemctl", "enable", "puppet.service"], capture=False)
elif os.path.exists("/sbin/chkconfig"):
subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False)
else:
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 87be5348..3ed5612b 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -4,11 +4,18 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Refresh IPv6 interface and RMC
-------------------------------
-**Summary:** Ensure Network Manager is not managing IPv6 interface
+"""Refresh IPv6 interface and RMC:
+Ensure Network Manager is not managing IPv6 interface"""
+
+import errno
+from cloudinit import log as logging
+from cloudinit import netinfo, subp, util
+from cloudinit.config.schema import MetaSchema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+
+MODULE_DESCRIPTION = """\
This module is IBM PowerVM Hypervisor specific
Reliable Scalable Cluster Technology (RSCT) is a set of software components
@@ -25,22 +32,20 @@ This module handles
- Refreshing RMC
- Disabling NetworkManager from handling IPv6 interface, as IPv6 interface
is used for communication between RMC daemon and PowerVM hypervisor.
-
-**Internal name:** ``cc_refresh_rmc_and_interface``
-
-**Module frequency:** always
-
-**Supported distros:** RHEL
-
"""
-import errno
-
-from cloudinit import log as logging
-from cloudinit import netinfo, subp, util
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_refresh_rmc_and_interface",
+ "name": "Refresh IPv6 Interface and RMC",
+ "title": "Ensure Network Manager is not managing IPv6 interface",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_ALWAYS,
+ "examples": [],
+}
+
+# This module is undocumented in our schema docs
+__doc__ = ""
LOG = logging.getLogger(__name__)
# Ensure that /opt/rsct/bin has been added to standard PATH of the
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index 3b929903..57f024ef 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -3,13 +3,18 @@
# Author: Aman Kumar Sinha <amansi26@in.ibm.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Reset RMC: Reset rsct node id"""
-"""
-Reset RMC
-------------
-**Summary:** reset rsct node id
+import os
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
Reset RMC module is IBM PowerVM Hypervisor specific
Reliable Scalable Cluster Technology (RSCT) is a set of software components,
@@ -28,21 +33,20 @@ This module handles
In order to do so, it restarts RSCT service.
Prerequisite of using this module is to install RSCT packages.
-
-**Internal name:** ``cc_reset_rmc``
-
-**Module frequency:** per instance
-
-**Supported distros:** rhel, sles and ubuntu
-
"""
-import os
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
+meta: MetaSchema = {
+ "id": "cc_reset_rmc",
+ "name": "Reset RMC",
+ "title": "reset rsct node id",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
+
+# This module is undocumented in our schema docs
+__doc__ = ""
# RMCCTRL is expected to be in system PATH (/opt/rsct/bin)
# The symlink for RMCCTRL and RECFGCT are
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 19b923a8..39da1b5a 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -14,18 +14,12 @@ import stat
from textwrap import dedent
from cloudinit import subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
NOBLOCK = "noblock"
-frequency = PER_ALWAYS
-distros = ["all"]
-
meta: MetaSchema = {
"id": "cc_resizefs",
"name": "Resizefs",
@@ -39,30 +33,18 @@ meta: MetaSchema = {
partition and will block the boot process while the resize command is
running. Optionally, the resize operation can be performed in the
background while cloud-init continues running modules. This can be
- enabled by setting ``resize_rootfs`` to ``true``. This module can be
+ enabled by setting ``resize_rootfs`` to ``noblock``. This module can be
disabled altogether by setting ``resize_rootfs`` to ``false``."""
),
- "distros": distros,
+ "distros": [ALL_DISTROS],
"examples": [
- "resize_rootfs: false # disable root filesystem resize operation"
+ "resize_rootfs: false # disable root filesystem resize operation",
+ "resize_rootfs: noblock # runs resize operation in the background",
],
"frequency": PER_ALWAYS,
}
-schema = {
- "type": "object",
- "properties": {
- "resize_rootfs": {
- "enum": [True, False, NOBLOCK],
- "description": dedent(
- """\
- Whether to resize the root partition. Default: 'true'"""
- ),
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def _resize_btrfs(mount_point, devpth):
@@ -229,7 +211,6 @@ def handle(name, cfg, _cloud, log, args):
resize_root = args[0]
else:
resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
- validate_cloudconfig_schema(cfg, schema)
if not util.translate_bool(resize_root, addons=[NOBLOCK]):
log.debug("Skipping module named %s, resizing disabled", name)
return
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index b2970d51..bbf68079 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -6,18 +6,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Resolv Conf
------------
-**Summary:** configure resolv.conf
+"""Resolv Conf: configure resolv.conf"""
+
+from textwrap import dedent
+from cloudinit import log as logging
+from cloudinit import templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+RESOLVE_CONFIG_TEMPLATE_MAP = {
+ "/etc/resolv.conf": "resolv.conf",
+ "/etc/systemd/resolved.conf": "systemd.resolved.conf",
+}
+
+MODULE_DESCRIPTION = """\
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
-where configuration management such as puppet or chef own dns configuration.
+where configuration management such as puppet or chef own DNS configuration.
As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat
will use sysconfig, this module is likely to be of little use unless those
are configured correctly.
+When using a :ref:`datasource_config_drive` and a RHEL-like system,
+resolv.conf will also be managed automatically due to the available
+information provided for DNS servers in the :ref:`network_config_v2` format.
+For those that with to have different settings, use this module.
+
+In order for the ``resolv_conf`` section to be applied, ``manage_resolv_conf``
+must be set ``true``.
+
.. note::
For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP
enabled NICs.
@@ -25,42 +45,40 @@ are configured correctly.
.. note::
And, in Ubuntu/Debian it is recommended that DNS be configured via the
standard /etc/network/interfaces configuration file.
-
-**Internal name:** ``cc_resolv_conf``
-
-**Module frequency:** per instance
-
-**Supported distros:** alpine, fedora, photon, rhel, sles
-
-**Config keys**::
-
- manage_resolv_conf: <true/false>
- resolv_conf:
- nameservers: ['8.8.4.4', '8.8.8.8']
- searchdomains:
- - foo.example.com
- - bar.example.com
- domain: example.com
- options:
- rotate: <true/false>
- timeout: 1
"""
-from cloudinit import log as logging
-from cloudinit import templater, util
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-distros = ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"]
-
-RESOLVE_CONFIG_TEMPLATE_MAP = {
- "/etc/resolv.conf": "resolv.conf",
- "/etc/systemd/resolved.conf": "systemd.resolved.conf",
+meta: MetaSchema = {
+ "id": "cc_resolv_conf",
+ "name": "Resolv Conf",
+ "title": "Configure resolv.conf",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ manage_resolv_conf: true
+ resolv_conf:
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ searchdomains:
+ - foo.example.com
+ - bar.example.com
+ domain: example.com
+ sortlist:
+ - 10.0.0.1/255
+ - 10.0.0.2
+ options:
+ rotate: true
+ timeout: 1
+ """
+ )
+ ],
}
+__doc__ = get_meta_doc(meta)
+
def generate_resolv_conf(template_fn, params, target_fname):
flags = []
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index b81a7a9b..b742cb95 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -3,47 +3,77 @@
# Author: Brent Baude <bbaude@redhat.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Red Hat Subscription: Register Red Hat Enterprise Linux based system"""
-"""
-Red Hat Subscription
---------------------
-**Summary:** register red hat enterprise linux based system
-
-Register a Red Hat system either by username and password *or* activation and
-org. Following a sucessful registration, you can auto-attach subscriptions, set
-the service level, add subscriptions based on pool id, enable/disable yum
-repositories based on repo id, and alter the rhsm_baseurl and server-hostname
-in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register Red Hat
-Subscription`` example config.
-
-**Internal name:** ``cc_rh_subscription``
-
-**Module frequency:** per instance
-
-**Supported distros:** rhel, fedora
-
-**Config keys**::
-
- rh_subscription:
- username: <username>
- password: <password>
- activation-key: <activation key>
- org: <org number>
- auto-attach: <true/false>
- service-level: <service level>
- add-pool: <list of pool ids>
- enable-repo: <list of yum repo ids>
- disable-repo: <list of yum repo ids>
- rhsm-baseurl: <url>
- server-hostname: <hostname>
-"""
+from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-distros = ["fedora", "rhel"]
+MODULE_DESCRIPTION = """\
+Register a Red Hat system either by username and password *or* activation and
+org. Following a successful registration, you can:
+
+ - auto-attach subscriptions
+ - set the service level
+ - add subscriptions based on pool id
+ - enable/disable yum repositories based on repo id
+ - alter the rhsm_baseurl and server-hostname in ``/etc/rhsm/rhs.conf``.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_rh_subscription",
+ "name": "Red Hat Subscription",
+ "title": "Register Red Hat Enterprise Linux based system",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["fedora", "rhel"],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ rh_subscription:
+ username: joe@foo.bar
+ ## Quote your password if it has symbols to be safe
+ password: '1234abcd'
+ """
+ ),
+ dedent(
+ """\
+ rh_subscription:
+ activation-key: foobar
+ org: 12345
+ """
+ ),
+ dedent(
+ """\
+ rh_subscription:
+ activation-key: foobar
+ org: 12345
+ auto-attach: true
+ service-level: self-support
+ add-pool:
+ - 1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a
+ - 2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b
+ enable-repo:
+ - repo-id-to-enable
+ - other-repo-id-to-enable
+ disable-repo:
+ - repo-id-to-disable
+ - other-repo-id-to-disable
+ # Alter the baseurl in /etc/rhsm/rhsm.conf
+ rhsm-baseurl: http://url
+ # Alter the server hostname in /etc/rhsm/rhsm.conf
+ server-hostname: foo.bar.com
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, _cloud, log, _args):
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 36a009a2..c1b0f8bd 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -6,13 +6,23 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Rightscale Userdata
--------------------
-**Summary:** support rightscale configuration hooks
+import os
+from urllib.parse import parse_qs
+
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MY_NAME = "cc_rightscale_userdata"
+MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK"
+
+"""Rightscale Userdata: Support rightscale configuration hooks"""
+
+MODULE_DESCRIPTION = """\
This module adds support for RightScale configuration hooks to cloud-init.
-RightScale adds a entry in the format ``CLOUD_INIT_REMOTE_HOOK=http://...`` to
+RightScale adds an entry in the format ``CLOUD_INIT_REMOTE_HOOK=http://...`` to
ec2 user-data. This module checks for this line in the raw userdata and
retrieves any scripts linked by the RightScale user data and places them in the
user scripts configuration directory, to be run later by ``cc_scripts_user``.
@@ -21,17 +31,23 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
the ``CLOUD_INIT_REMOTE_HOOK`` config variable is present in the raw ec2
user data only, not in any cloud-config parts
-**Internal name:** ``cc_rightscale_userdata``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
+**Raw user data schema**::
CLOUD_INIT_REMOTE_HOOK=<url>
"""
+meta: MetaSchema = {
+ "id": "cc_rightscale_userdata",
+ "name": "RightScale Userdata",
+ "title": "Support rightscale configuration hooks",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
+
+__doc__ = get_meta_doc(meta)
+
#
# The purpose of this script is to allow cloud-init to consume
# rightscale style userdata. rightscale user data is key-value pairs
@@ -49,18 +65,6 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
#
#
-import os
-from urllib.parse import parse_qs
-
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK"
-
def handle(name, _cfg, cloud, log, _args):
try:
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index db2a3c79..57b8aa62 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -6,183 +6,63 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-.. _cc_rsyslog:
+"""Rsyslog: Configure system logging via rsyslog"""
-Rsyslog
--------
-**Summary:** configure system logging via rsyslog
+import os
+import re
+from textwrap import dedent
-This module configures remote system logging using rsyslog.
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
-The rsyslog config file to write to can be specified in ``config_filename``,
-which defaults to ``20-cloud-config.conf``. The rsyslog config directory to
-write config files to may be specified in ``config_dir``, which defaults to
-``/etc/rsyslog.d``.
-
-A list of configurations for rsyslog can be specified under the ``configs`` key
-in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a
-dictionary. Each config entry contains a configuration string and a file to
-write it to. For config entries that are a dictionary, ``filename`` sets the
-target filename and ``content`` specifies the config string to write. For
-config entries that are only a string, the string is used as the config string
-to write. If the filename to write the config to is not specified, the value of
-the ``config_filename`` key is used. A file with the selected filename will be
-written inside the directory specified by ``config_dir``.
-
-The command to use to reload the rsyslog service after the config has been
-updated can be specified in ``service_reload_command``. If this is set to
-``auto``, then an appropriate command for the distro will be used. This is the
-default behavior. To manually set the command, use a list of command args (e.g.
-``[systemctl, restart, rsyslog]``).
+MODULE_DESCRIPTION = """\
+This module configures remote system logging using rsyslog.
Configuration for remote servers can be specified in ``configs``, but for
-convenience it can be specified as key value pairs in ``remotes``. Each key
-is the name for an rsyslog remote entry. Each value holds the contents of the
-remote config for rsyslog. The config consists of the following parts:
-
- - filter for log messages (defaults to ``*.*``)
- - optional leading ``@`` or ``@@``, indicating udp and tcp respectively
- (defaults to ``@``, for udp)
- - ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]``
- format, (e.g. ``@[fd00::1]:514``)
- - optional port number (defaults to ``514``)
-
-This module will provide sane defaults for any part of the remote entry that is
-not specified, so in most cases remote hosts can be specified just using
-``<name>: <address>``.
-
-For backwards compatibility, this module still supports legacy names for the
-config entries. Legacy to new mappings are as follows:
-
- - ``rsyslog`` -> ``rsyslog/configs``
- - ``rsyslog_filename`` -> ``rsyslog/config_filename``
- - ``rsyslog_dir`` -> ``rsyslog/config_dir``
-
-.. note::
- The legacy config format does not support specifying
- ``service_reload_command``.
-
-**Internal name:** ``cc_rsyslog``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- rsyslog:
- config_dir: config_dir
- config_filename: config_filename
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- remotes:
- maas: "192.168.1.1"
- juju: "10.0.4.1"
- service_reload_command: [your, syslog, restart, command]
-
-**Legacy config keys**::
-
- rsyslog:
- - "*.* @@192.158.1.1"
- rsyslog_dir: /etc/rsyslog-config.d/
- rsyslog_filename: 99-local.conf
+convenience it can be specified as key value pairs in ``remotes``.
"""
-# Old rsyslog documentation, kept for reference:
-#
-# rsyslog module allows configuration of syslog logging via rsyslog
-# Configuration is done under the cloud-config top level 'rsyslog'.
-#
-# Under 'rsyslog' you can define:
-# - configs: [default=[]]
-# this is a list. entries in it are a string or a dictionary.
-# each entry has 2 parts:
-# * content
-# * filename
-# if the entry is a string, then it is assigned to 'content'.
-# for each entry, content is written to the provided filename.
-# if filename is not provided, its default is read from 'config_filename'
-#
-# Content here can be any valid rsyslog configuration. No format
-# specific format is enforced.
-#
-# For simply logging to an existing remote syslog server, via udp:
-# configs: ["*.* @192.168.1.1"]
-#
-# - remotes: [default={}]
-# This is a dictionary of name / value pairs.
-# In comparison to 'config's, it is more focused in that it only supports
-# remote syslog configuration. It is not rsyslog specific, and could
-# convert to other syslog implementations.
-#
-# Each entry in remotes is a 'name' and a 'value'.
-# * name: an string identifying the entry. good practice would indicate
-# using a consistent and identifiable string for the producer.
-# For example, the MAAS service could use 'maas' as the key.
-# * value consists of the following parts:
-# * optional filter for log messages
-# default if not present: *.*
-# * optional leading '@' or '@@' (indicates udp or tcp respectively).
-# default if not present (udp): @
-# This is rsyslog format for that. if not present, is '@'.
-# * ipv4 or ipv6 or hostname
-# ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
-# * optional port
-# port defaults to 514
-#
-# - config_filename: [default=20-cloud-config.conf]
-# this is the file name to use if none is provided in a config entry.
-#
-# - config_dir: [default=/etc/rsyslog.d]
-# this directory is used for filenames that are not absolute paths.
-#
-# - service_reload_command: [default="auto"]
-# this command is executed if files have been written and thus the syslog
-# daemon needs to be told.
-#
-# Note, since cloud-init 0.5 a legacy version of rsyslog config has been
-# present and is still supported. See below for the mappings between old
-# value and new value:
-# old value -> new value
-# 'rsyslog' -> rsyslog/configs
-# 'rsyslog_filename' -> rsyslog/config_filename
-# 'rsyslog_dir' -> rsyslog/config_dir
-#
-# the legacy config does not support 'service_reload_command'.
-#
-# Example config:
-# #cloud-config
-# rsyslog:
-# configs:
-# - "*.* @@192.158.1.1"
-# - content: "*.* @@192.0.2.1:10514"
-# filename: 01-example.conf
-# - content: |
-# *.* @@syslogd.example.com
-# remotes:
-# maas: "192.168.1.1"
-# juju: "10.0.4.1"
-# config_dir: config_dir
-# config_filename: config_filename
-# service_reload_command: [your, syslog, restart, command]
-#
-# Example Legacy config:
-# #cloud-config
-# rsyslog:
-# - "*.* @@192.158.1.1"
-# rsyslog_dir: /etc/rsyslog-config.d/
-# rsyslog_filename: 99-local.conf
-
-import os
-import re
-
-from cloudinit import log as logging
-from cloudinit import subp, util
+meta: MetaSchema = {
+ "id": "cc_rsyslog",
+ "name": "Rsyslog",
+ "title": "Configure system logging via rsyslog",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ rsyslog:
+ remotes:
+ maas: 192.168.1.1
+ juju: 10.0.4.1
+ service_reload_command: auto
+ """
+ ),
+ dedent(
+ """\
+ rsyslog:
+ config_dir: /opt/etc/rsyslog.d
+ config_filename: 99-late-cloud-config.conf
+ configs:
+ - "*.* @@192.158.1.1"
+ - content: "*.* @@192.0.2.1:10514"
+ filename: 01-example.conf
+ - content: |
+ *.* @@syslogd.example.com
+ remotes:
+ maas: 192.168.1.1
+ juju: 10.0.4.1
+ service_reload_command: [your, syslog, restart, command]
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
@@ -214,12 +94,19 @@ def reload_syslog(distro, command=DEF_RELOAD):
return subp.subp(command, capture=True)
-def load_config(cfg):
- # return an updated config with entries of the correct type
- # support converting the old top level format into new format
+def load_config(cfg: dict) -> dict:
+ """Return an updated config.
+
+ Support converting the old top level format into new format.
+ Raise a `ValueError` if some top level entry has an incorrect type.
+ """
mycfg = cfg.get("rsyslog", {})
if isinstance(cfg.get("rsyslog"), list):
+ LOG.warning(
+ "DEPRECATION: This rsyslog list format is deprecated and will be "
+ "removed in a future version of cloud-init. Use documented keys."
+ )
mycfg = {KEYNAME_CONFIGS: cfg.get("rsyslog")}
if KEYNAME_LEGACY_FILENAME in cfg:
mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
@@ -235,8 +122,13 @@ def load_config(cfg):
)
for key, default, vtypes in fillup:
- if key not in mycfg or not isinstance(mycfg[key], vtypes):
+ if key not in mycfg:
mycfg[key] = default
+ elif not isinstance(mycfg[key], vtypes):
+ raise ValueError(
+ f"Invalid type for key `{key}`. Expected type(s): {vtypes}. "
+ f"Current type: {type(mycfg[key])}"
+ )
return mycfg
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index c5206003..7c614f57 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -12,11 +12,7 @@ import os
from textwrap import dedent
from cloudinit import util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -26,36 +22,36 @@ from cloudinit.settings import PER_INSTANCE
# configuration options before actually attempting to deploy with said
# configuration.
-distros = [ALL_DISTROS]
-meta: MetaSchema = {
- "id": "cc_runcmd",
- "name": "Runcmd",
- "title": "Run arbitrary commands",
- "description": dedent(
- """\
- Run arbitrary commands at a rc.local like level with output to the
- console. Each item can be either a list or a string. If the item is a
- list, it will be properly quoted. Each item is written to
- ``/var/lib/cloud/instance/runcmd`` to be later interpreted using
- ``sh``.
+MODULE_DESCRIPTION = """\
+Run arbitrary commands at a rc.local like level with output to the
+console. Each item can be either a list or a string. If the item is a
+list, it will be properly quoted. Each item is written to
+``/var/lib/cloud/instance/runcmd`` to be later interpreted using
+``sh``.
- Note that the ``runcmd`` module only writes the script to be run
- later. The module that actually runs the script is ``scripts-user``
- in the :ref:`Final` boot stage.
+Note that the ``runcmd`` module only writes the script to be run
+later. The module that actually runs the script is ``scripts-user``
+in the :ref:`topics/boot:Final` boot stage.
- .. note::
+.. note::
- all commands must be proper yaml, so you have to quote any characters
- yaml would eat (':' can be problematic)
+ all commands must be proper yaml, so you have to quote any characters
+ yaml would eat (':' can be problematic)
- .. note::
+.. note::
- when writing files, do not use /tmp dir as it races with
- systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """
- ),
- "distros": distros,
+ when writing files, do not use /tmp dir as it races with
+ systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_runcmd",
+ "name": "Runcmd",
+ "title": "Run arbitrary commands",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
"examples": [
dedent(
"""\
@@ -68,29 +64,9 @@ meta: MetaSchema = {
"""
)
],
- "frequency": PER_INSTANCE,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "runcmd": {
- "type": "array",
- "items": {
- "oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"},
- {"type": "null"},
- ]
- },
- "additionalItems": False, # Reject items of non-string non-list
- "additionalProperties": False,
- "minItems": 1,
- }
- },
}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
@@ -100,7 +76,6 @@ def handle(name, cfg, cloud, log, _args):
)
return
- validate_cloudconfig_schema(cfg, schema)
out_fn = os.path.join(cloud.get_ipath("scripts"), "runcmd")
cmd = cfg["runcmd"]
try:
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 0eb46664..df9d4205 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -2,11 +2,17 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Salt Minion
------------
-**Summary:** set up and run salt minion
+"""Salt Minion: Setup and run salt minion"""
+
+import os
+from textwrap import dedent
+
+from cloudinit import safeyaml, subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, bsd_utils
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module installs, configures and starts salt minion. If the ``salt_minion``
key is present in the config parts, then salt minion will be installed and
started. Configuration for salt minion can be specified in the ``conf`` key
@@ -16,37 +22,45 @@ specified with ``public_key`` and ``private_key`` respectively. Optionally if
you have a custom package name, service name or config directory you can
specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
-**Internal name:** ``cc_salt_minion``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- salt_minion:
- pkg_name: 'salt-minion'
- service_name: 'salt-minion'
- config_dir: '/etc/salt'
- conf:
- master: salt.example.com
- grains:
- role:
- - web
- public_key: |
- ------BEGIN PUBLIC KEY-------
- <key data>
- ------END PUBLIC KEY-------
- private_key: |
- ------BEGIN PRIVATE KEY------
- <key data>
- ------END PRIVATE KEY-------
+Salt keys can be manually generated by: ``salt-key --gen-keys=GEN_KEYS``,
+where ``GEN_KEYS`` is the name of the keypair, e.g. 'minion'. The keypair
+will be copied to ``/etc/salt/pki`` on the minion instance.
"""
-import os
+meta: MetaSchema = {
+ "id": "cc_salt_minion",
+ "name": "Salt Minion",
+ "title": "Setup and run salt minion",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ salt_minion:
+ pkg_name: salt-minion
+ service_name: salt-minion
+ config_dir: /etc/salt
+ conf:
+ master: salt.example.com
+ grains:
+ role:
+ - web
+ public_key: |
+ ------BEGIN PUBLIC KEY-------
+ <key data>
+ ------END PUBLIC KEY-------
+ private_key: |
+ ------BEGIN PRIVATE KEY------
+ <key data>
+ ------END PRIVATE KEY-------
+ pki_dir: /etc/salt/pki/minion
+ """
+ )
+ ],
+}
-from cloudinit import safeyaml, subp, util
-from cloudinit.distros import bsd_utils
+__doc__ = get_meta_doc(meta)
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index b7bfb7aa..aa311d59 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -5,29 +5,34 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts Per Boot: Run per boot scripts"""
-"""
-Scripts Per Boot
-----------------
-**Summary:** run per boot scripts
+import os
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+MODULE_DESCRIPTION = """\
Any scripts in the ``scripts/per-boot`` directory on the datasource will be run
every time the system boots. Scripts will be run in alphabetical order. This
module does not accept any config keys.
-
-**Internal name:** ``cc_scripts_per_boot``
-
-**Module frequency:** always
-
-**Supported distros:** all
"""
-import os
-from cloudinit import subp
-from cloudinit.settings import PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_scripts_per_boot",
+ "name": "Scripts Per Boot",
+ "title": "Run per boot scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [],
+}
-frequency = PER_ALWAYS
+__doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "per-boot"
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index ef102b1c..1fb40717 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -5,32 +5,36 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts Per Instance: Run per instance scripts"""
-"""
-Scripts Per Instance
---------------------
-**Summary:** run per instance scripts
+import os
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
Any scripts in the ``scripts/per-instance`` directory on the datasource will
be run when a new instance is first booted. Scripts will be run in alphabetical
order. This module does not accept any config keys.
Some cloud platforms change instance-id if a significant change was made to
the system. As a result per-instance scripts will run again.
-
-**Internal name:** ``cc_scripts_per_instance``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
"""
-import os
+meta: MetaSchema = {
+ "id": "cc_scripts_per_instance",
+ "name": "Scripts Per Instance",
+ "title": "Run per instance scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
-from cloudinit import subp
-from cloudinit.settings import PER_INSTANCE
+__doc__ = get_meta_doc(meta)
-frequency = PER_INSTANCE
SCRIPT_SUBDIR = "per-instance"
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index bf4231e7..d9f406b7 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -5,30 +5,34 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts Per Once: Run one time scripts"""
-"""
-Scripts Per Once
-----------------
-**Summary:** run one time scripts
+import os
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ONCE
+frequency = PER_ONCE
+MODULE_DESCRIPTION = """\
Any scripts in the ``scripts/per-once`` directory on the datasource will be run
only once. Changes to the instance will not force a re-run. The only way to
re-run these scripts is to run the clean subcommand and reboot. Scripts will
be run in alphabetical order. This module does not accept any config keys.
-
-**Internal name:** ``cc_scripts_per_once``
-
-**Module frequency:** per once
-
-**Supported distros:** all
"""
-import os
-
-from cloudinit import subp
-from cloudinit.settings import PER_ONCE
-
-frequency = PER_ONCE
+meta: MetaSchema = {
+ "id": "cc_scripts_per_once",
+ "name": "Scripts Per Once",
+ "title": "Run one time scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [],
+}
+
+__doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "per-once"
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index e0d6c560..85375dac 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -5,32 +5,36 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts User: Run user scripts"""
-"""
-Scripts User
-------------
-**Summary:** run user scripts
+import os
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module runs all user scripts. User scripts are not specified in the
``scripts`` directory in the datasource, but rather are present in the
``scripts`` dir in the instance configuration. Any cloud-config parts with a
``#!`` will be treated as a script and run. Scripts specified as cloud-config
parts will be run in the order they are specified in the configuration.
This module does not accept any config keys.
-
-**Internal name:** ``cc_scripts_user``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
"""
-import os
+meta: MetaSchema = {
+ "id": "cc_scripts_user",
+ "name": "Scripts User",
+ "title": "Run user scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
-from cloudinit import subp
-from cloudinit.settings import PER_INSTANCE
+__doc__ = get_meta_doc(meta)
-frequency = PER_INSTANCE
SCRIPT_SUBDIR = "scripts"
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index 1b30fa1b..894404f8 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -3,35 +3,59 @@
# Author: Ben Howard <ben.howard@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Scripts Vendor
---------------
-**Summary:** run vendor scripts
-
-Any scripts in the ``scripts/vendor`` directory in the datasource will be run
-when a new instance is first booted. Scripts will be run in alphabetical order.
-Vendor scripts can be run with an optional prefix specified in the ``prefix``
-entry under the ``vendor_data`` config key.
-
-**Internal name:** ``cc_scripts_vendor``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- vendor_data:
- prefix: <vendor data prefix>
-"""
+"""Scripts Vendor: Run vendor scripts"""
import os
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
+MODULE_DESCRIPTION = """\
+On select Datasources, vendors have a channel for the consumption
+of all supported user data types via a special channel called
+vendor data. Any scripts in the ``scripts/vendor`` directory in the datasource
+will be run when a new instance is first booted. Scripts will be run in
+alphabetical order. This module allows control over the execution of
+vendor data.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_scripts_vendor",
+ "name": "Scripts Vendor",
+ "title": "Run vendor scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ vendor_data:
+ enabled: true
+ prefix: /usr/bin/ltrace
+ """
+ ),
+ dedent(
+ """\
+ vendor_data:
+ enabled: true
+ prefix: [timeout, 30]
+ """
+ ),
+ dedent(
+ """\
+ # Vendor data will not be processed
+ vendor_data:
+ enabled: false
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
SCRIPT_SUBDIR = "vendor"
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 67ba8ef5..b0ffdd15 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -6,73 +6,72 @@
# Author: Scott Moser <scott.moser@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Seed Random: Provide random seed data"""
-"""
-Seed Random
------------
-**Summary:** provide random seed data
+import base64
+import os
+from io import BytesIO
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
-Since all cloud instances started from the same image will produce very similar
-data when they are first booted, as they are all starting with the same seed
+MODULE_DESCRIPTION = """\
+All cloud instances started from the same image will produce very similar
+data when they are first booted as they are all starting with the same seed
for the kernel's entropy keyring. To avoid this, random seed data can be
provided to the instance either as a string or by specifying a command to run
to generate the data.
-Configuration for this module is under the ``random_seed`` config key. The
-``file`` key specifies the path to write the data to, defaulting to
-``/dev/urandom``. Data can be passed in directly with ``data``, and may
-optionally be specified in encoded form, with the encoding specified in
-``encoding``.
-
-If the cloud provides its own random seed data, it will be appended to ``data``
+Configuration for this module is under the ``random_seed`` config key. If
+the cloud provides its own random seed data, it will be appended to ``data``
before it is written to ``file``.
-.. note::
- when using a multiline value for ``data`` or specifying binary data, be
- sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format
- specifiers when appropriate
-
If the ``command`` key is specified, the given command will be executed. This
will happen after ``file`` has been populated. That command's environment will
contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is
specified that cannot be run, no error will be reported unless
``command_required`` is set to true.
-
-For example, to use ``pollinate`` to gather data from a
-remote entropy server and write it to ``/dev/urandom``, the following could be
-used::
-
- random_seed:
- file: /dev/urandom
- command: ["pollinate", "--server=http://local.polinate.server"]
- command_required: true
-
-**Internal name:** ``cc_seed_random``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- random_seed:
- file: <file>
- data: <random string>
- encoding: <raw/base64/b64/gzip/gz>
- command: [<cmd name>, <arg1>, <arg2>...]
- command_required: <true/false>
"""
-import base64
-import os
-from io import BytesIO
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-LOG = logging.getLogger(__name__)
+meta: MetaSchema = {
+ "id": "cc_seed_random",
+ "name": "Seed Random",
+ "title": "Provide random seed data",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ random_seed:
+ file: /dev/urandom
+ data: my random string
+ encoding: raw
+ command: ['sh', '-c', 'dd if=/dev/urandom of=$RANDOM_SEED_FILE']
+ command_required: true
+ """
+ ),
+ dedent(
+ """\
+ # To use 'pollinate' to gather data from a remote entropy
+ # server and write it to '/dev/urandom', the following
+ # could be used:
+ random_seed:
+ file: /dev/urandom
+ command: ["pollinate", "--server=http://local.polinate.server"]
+ command_required: true
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def _decode(data, encoding=None):
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index eb0ca328..a5b989d0 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -5,24 +5,36 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Set Hostname: Set hostname and FQDN"""
-"""
-Set Hostname
-------------
-**Summary:** set hostname and fqdn
-
-This module handles setting the system hostname and fqdn. If
-``preserve_hostname`` is set, then the hostname will not be altered.
+import os
+from textwrap import dedent
-A hostname and fqdn can be provided by specifying a full domain name under the
-``fqdn`` key. Alternatively, a hostname can be specified using the ``hostname``
-key, and the fqdn of the cloud wil be used. If a fqdn specified with the
+from cloudinit import util
+from cloudinit.atomic_helper import write_json
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+MODULE_DESCRIPTION = """\
+This module handles setting the system hostname and fully qualified domain
+name (FQDN). If ``preserve_hostname`` is set, then the hostname will not be
+altered.
+
+A hostname and FQDN can be provided by specifying a full domain name under the
+``FQDN`` key. Alternatively, a hostname can be specified using the ``hostname``
+key, and the FQDN of the cloud will be used. If a FQDN specified with the
``hostname`` key, it will be handled properly, although it is better to use
the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set,
-it is distro dependent whether ``hostname`` or ``fqdn`` is used,
-unless the ``prefer_fqdn_over_hostname`` option is true and fqdn is set
-it will force the use of FQDN in all distros, and if false then it will
-force the hostname use.
+the ``prefer_fqdn_over_hostname`` will force the use of FQDN in all distros
+when true, and when false it will force the short hostname. Otherwise, the
+hostname to use is distro-dependent.
+
+.. note::
+ cloud-init performs no hostname input validation before sending the
+ hostname to distro-specific tools, and most tools will not accept a
+ trailing dot on the FQDN.
This module will run in the init-local stage before networking is configured
if the hostname is set by metadata or user data on the local system.
@@ -31,25 +43,28 @@ This will occur on datasources like nocloud and ovf where metadata and user
data are available locally. This ensures that the desired hostname is applied
before any DHCP requests are preformed on these platforms where dynamic DNS is
based on initial hostname.
-
-**Internal name:** ``cc_set_hostname``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- preserve_hostname: <true/false>
- prefer_fqdn_over_hostname: <true/false>
- fqdn: <fqdn>
- hostname: <fqdn/hostname>
"""
-import os
-
-from cloudinit import util
-from cloudinit.atomic_helper import write_json
+meta: MetaSchema = {
+ "id": "cc_set_hostname",
+ "name": "Set Hostname",
+ "title": "Set hostname and FQDN",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [
+ "preserve_hostname: true",
+ dedent(
+ """\
+ hostname: myhost
+ fqdn: myhost.example.com
+ prefer_fqdn_over_hostname: true
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
class SetHostnameError(Exception):
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index d8df8e23..3c8b378b 100755..100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -5,85 +5,73 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Set Passwords: Set user passwords and enable/disable SSH password auth"""
-"""
-Set Passwords
--------------
-**Summary:** Set user passwords and enable/disable SSH password authentication
+import re
+from string import ascii_letters, digits
+from textwrap import dedent
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, Distro, ug_util
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.ssh_util import update_ssh_config
+
+MODULE_DESCRIPTION = """\
This module consumes three top-level config keys: ``ssh_pwauth``, ``chpasswd``
and ``password``.
The ``ssh_pwauth`` config key determines whether or not sshd will be configured
-to accept password authentication. True values will enable password auth,
-false values will disable password auth, and the literal string ``unchanged``
-will leave it unchanged. Setting no value will also leave the current setting
-on-disk unchanged.
+to accept password authentication.
The ``chpasswd`` config key accepts a dictionary containing either or both of
-``expire`` and ``list``.
-
-If the ``list`` key is provided, it should contain a list of
-``username:password`` pairs. This can be either a YAML list (of strings), or a
-multi-line string with one pair per line. Each user will have the
-corresponding password set. A password can be randomly generated by specifying
-``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool
-like ``mkpasswd``, can be specified; a regex
-(``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value
-should be treated as a hash.
-
-.. note::
- The users specified must already exist on the system. Users will have been
- created by the ``cc_users_groups`` module at this point.
-
-By default, all users on the system will have their passwords expired (meaning
-that they will have to be reset the next time the user logs in). To disable
-this behaviour, set ``expire`` under ``chpasswd`` to a false value.
-
-If a ``list`` of user/password pairs is not specified under ``chpasswd``, then
-the value of the ``password`` config key will be used to set the default user's
-password.
-
-**Internal name:** ``cc_set_passwords``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- ssh_pwauth: <yes/no/unchanged>
-
- password: password1
- chpasswd:
- expire: <true/false>
-
- chpasswd:
- list: |
- user1:password1
- user2:RANDOM
- user3:password3
- user4:R
-
- ##
- # or as yaml list
- ##
- chpasswd:
- list:
- - user1:password1
- - user2:RANDOM
- - user3:password3
- - user4:R
- - user4:$6$rL..$ej...
-"""
+``list`` and ``expire``. The ``list`` key is used to assign a password to a
+to a corresponding pre-existing user. The ``expire`` key is used to set
+whether to expire all user passwords such that a password will need to be reset
+on the user's next login.
-import re
-from string import ascii_letters, digits
+``password`` config key is used to set the default user's password. It is
+ignored if the ``chpasswd`` ``list`` is used.
+"""
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.distros import ug_util
-from cloudinit.ssh_util import update_ssh_config
+meta: MetaSchema = {
+ "id": "cc_set_passwords",
+ "name": "Set Passwords",
+ "title": "Set user passwords and enable/disable SSH password auth",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ # Set a default password that would need to be changed
+ # at first login
+ ssh_pwauth: true
+ password: password1
+ """
+ ),
+ dedent(
+ """\
+ # Disable ssh password authentication
+ # Don't require users to change their passwords on next login
+ # Set the password for user1 to be 'password1' (OS does hashing)
+ # Set the password for user2 to be a randomly generated password,
+ # which will be written to the system console
+ # Set the password for user3 to a pre-hashed password
+ ssh_pwauth: false
+ chpasswd:
+ expire: false
+ list:
+ - user1:password1
+ - user2:RANDOM
+ - user3:$6$rounds=4096$5DJ8a9WMTEzIo5J4$Yms6imfeBvf3Yfu84mQBerh18l7OR1Wm1BJXZqFSpJ6BVas0AYJqIjP7czkOaAZHZi1kxQ5Y1IhgWN8K9NgxR1
+ """ # noqa
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
@@ -91,7 +79,7 @@ LOG = logging.getLogger(__name__)
PW_SET = "".join([x for x in ascii_letters + digits if x not in "loLOI01"])
-def handle_ssh_pwauth(pw_auth, distro):
+def handle_ssh_pwauth(pw_auth, distro: Distro):
"""Apply sshd PasswordAuthentication changes.
@param pw_auth: config setting from 'pw_auth'.
@@ -99,8 +87,57 @@ def handle_ssh_pwauth(pw_auth, distro):
@param distro: an instance of the distro class for the target distribution
@return: None"""
+ service = distro.get_option("ssh_svcname", "ssh")
+ restart_ssh = True
+ try:
+ distro.manage_service("status", service)
+ except subp.ProcessExecutionError as e:
+ uses_systemd = distro.uses_systemd()
+ if not uses_systemd:
+ LOG.debug(
+ "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
+ " will not be restarted because it is not running or not"
+ " available.",
+ pw_auth,
+ service,
+ )
+ restart_ssh = False
+ elif e.exit_code == 3:
+ # Service is not running. Write ssh config.
+ LOG.debug(
+ "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
+ " will not be restarted because it is stopped.",
+ pw_auth,
+ service,
+ )
+ restart_ssh = False
+ elif e.exit_code == 4:
+ # Service status is unknown
+ LOG.warning(
+ "Ignoring config 'ssh_pwauth: %s'."
+ " SSH service '%s' is not installed.",
+ pw_auth,
+ service,
+ )
+ return
+ else:
+ LOG.warning(
+ "Ignoring config 'ssh_pwauth: %s'."
+ " SSH service '%s' is not available. Error: %s.",
+ pw_auth,
+ service,
+ e,
+ )
+ return
+
cfg_name = "PasswordAuthentication"
+ if isinstance(pw_auth, str):
+ LOG.warning(
+ "DEPRECATION: The 'ssh_pwauth' config key should be set to "
+ "a boolean value. The string format is deprecated and will be "
+ "removed in a future version of cloud-init."
+ )
if util.is_true(pw_auth):
cfg_val = "yes"
elif util.is_false(pw_auth):
@@ -118,8 +155,11 @@ def handle_ssh_pwauth(pw_auth, distro):
LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
- distro.manage_service("restart", distro.get_option("ssh_svcname", "ssh"))
- LOG.debug("Restarted the SSH daemon.")
+ if restart_ssh:
+ distro.manage_service("restart", service)
+ LOG.debug("Restarted the SSH daemon.")
+ else:
+ LOG.debug("Not restarting SSH service: service is stopped.")
def handle(_name, cfg, cloud, log, args):
@@ -141,6 +181,11 @@ def handle(_name, cfg, cloud, log, args):
log.debug("Handling input for chpasswd as list.")
plist = util.get_cfg_option_list(chfg, "list", plist)
else:
+ log.warning(
+ "DEPRECATION: The chpasswd multiline string format is "
+ "deprecated and will be removed from a future version of "
+ "cloud-init. Use the list format instead."
+ )
log.debug("Handling input for chpasswd as multiline string.")
plist = util.get_cfg_option_str(chfg, "list", plist)
if plist:
@@ -227,7 +272,7 @@ def handle(_name, cfg, cloud, log, args):
handle_ssh_pwauth(cfg.get("ssh_pwauth"), cloud.distro)
if len(errors):
- log.debug("%s errors occured, re-raising the last one", len(errors))
+ log.debug("%s errors occurred, re-raising the last one", len(errors))
raise errors[-1]
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 9f343df0..41a6adf9 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -9,11 +9,7 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
@@ -54,10 +50,6 @@ meta: MetaSchema = {
best to create a snap seed directory and seed.yaml manifest in
**/var/lib/snapd/seed/** which snapd automatically installs on
startup.
-
- **Development only**: The ``squashfuse_in_container`` boolean can be
- set true to install squashfuse package when in a container to enable
- snap installs. Default is false.
"""
),
"distros": distros,
@@ -78,94 +70,44 @@ meta: MetaSchema = {
),
dedent(
"""\
- # LXC-based containers require squashfuse before snaps can be installed
- snap:
- commands:
- 00: apt-get install squashfuse -y
- 11: snap install emoj
-
- """
- ),
- dedent(
- """\
# Convenience: the snap command can be omitted when specifying commands
# as a list and 'snap' will automatically be prepended.
# The following commands are equivalent:
snap:
- commands:
- 00: ['install', 'vlc']
- 01: ['snap', 'install', 'vlc']
- 02: snap install vlc
- 03: 'snap install vlc'
+ commands:
+ 00: ['install', 'vlc']
+ 01: ['snap', 'install', 'vlc']
+ 02: snap install vlc
+ 03: 'snap install vlc'
"""
),
dedent(
"""\
# You can use a list of commands
snap:
- commands:
- - ['install', 'vlc']
- - ['snap', 'install', 'vlc']
- - snap install vlc
- - 'snap install vlc'
+ commands:
+ - ['install', 'vlc']
+ - ['snap', 'install', 'vlc']
+ - snap install vlc
+ - 'snap install vlc'
"""
),
dedent(
"""\
# You can use a list of assertions
snap:
- assertions:
- - signed_assertion_blob_here
- - |
- signed_assertion_blob_here
+ assertions:
+ - signed_assertion_blob_here
+ - |
+ signed_assertion_blob_here
"""
),
],
"frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "snap": {
- "type": "object",
- "properties": {
- "assertions": {
- "type": ["object", "array"], # Array of strings or dict
- "items": {"type": "string"},
- "additionalItems": False, # Reject items non-string
- "minItems": 1,
- "minProperties": 1,
- "uniqueItems": True,
- "additionalProperties": {"type": "string"},
- },
- "commands": {
- "type": ["object", "array"], # Array of strings or dict
- "items": {
- "oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"},
- ]
- },
- "additionalItems": False, # Reject non-string & non-list
- "minItems": 1,
- "minProperties": 1,
- "additionalProperties": {
- "oneOf": [
- {"type": "string"},
- {"type": "array", "items": {"type": "string"}},
- ],
- },
- },
- "squashfuse_in_container": {"type": "boolean"},
- },
- "additionalProperties": False, # Reject keys not in schema
- "minProperties": 1,
- }
- },
-}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
SNAP_CMD = "snap"
ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
@@ -240,23 +182,6 @@ def run_commands(commands):
raise RuntimeError(msg)
-# RELEASE_BLOCKER: Once LP: #1628289 is released on xenial, drop this function.
-def maybe_install_squashfuse(cloud):
- """Install squashfuse if we are in a container."""
- if not util.is_container():
- return
- try:
- cloud.distro.update_package_sources()
- except Exception:
- util.logexc(LOG, "Package update failed")
- raise
- try:
- cloud.distro.install_packages(["squashfuse"])
- except Exception:
- util.logexc(LOG, "Failed to install squashfuse")
- raise
-
-
def handle(name, cfg, cloud, log, args):
cfgin = cfg.get("snap", {})
if not cfgin:
@@ -265,9 +190,6 @@ def handle(name, cfg, cloud, log, args):
)
return
- validate_cloudconfig_schema(cfg, schema)
- if util.is_true(cfgin.get("squashfuse_in_container", False)):
- maybe_install_squashfuse(cloud)
add_assertions(cfgin.get("assertions", []))
run_commands(cfgin.get("commands", []))
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 3fa6c388..6820a816 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -1,10 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Spacewalk: Install and configure spacewalk"""
-"""
-Spacewalk
----------
-**Summary:** install and configure spacewalk
+from textwrap import dedent
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module installs spacewalk and applies basic configuration. If the
``spacewalk`` config key is present spacewalk will be installed. The server to
connect to after installation must be provided in the ``server`` in spacewalk
@@ -12,22 +15,29 @@ configuration. A proxy to connect through and a activation key may optionally
be specified.
For more information about spacewalk see: https://fedorahosted.org/spacewalk/
+"""
-**Internal name:** ``cc_spacewalk``
-
-**Module frequency:** per instance
-
-**Supported distros:** redhat, fedora
+meta: MetaSchema = {
+ "id": "cc_spacewalk",
+ "name": "Spacewalk",
+ "title": "Install and configure spacewalk",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["rhel", "fedora"],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ spacewalk:
+ server: <url>
+ proxy: <proxy host>
+ activation_key: <key>
+ """
+ )
+ ],
+}
-**Config keys**::
+__doc__ = get_meta_doc(meta)
- spacewalk:
- server: <url>
- proxy: <proxy host>
- activation_key: <key>
-"""
-
-from cloudinit import subp
distros = ["redhat", "fedora"]
required_packages = ["rhn-setup"]
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 64486b9c..33c1fd0c 100755..100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -5,12 +5,23 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""SSH: Configure SSH and SSH keys"""
-"""
-SSH
----
-**Summary:** configure SSH and SSH keys (host and authorized)
+import glob
+import os
+import re
+import sys
+from logging import Logger
+from textwrap import dedent
+from typing import List, Optional, Sequence
+
+from cloudinit import ssh_util, subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, ug_util
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module handles most configuration for SSH and both host and authorized SSH
keys.
@@ -28,12 +39,7 @@ should be specified as a list of public keys.
password authentication
Root login can be enabled/disabled using the ``disable_root`` config key. Root
-login options can be manually specified with ``disable_root_opts``. If
-``disable_root_opts`` is specified and contains the string ``$USER``,
-it will be replaced with the username of the default user. By default,
-root login is disabled, and root login opts are set to::
-
- no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+login options can be manually specified with ``disable_root_opts``.
Supported public key types for the ``ssh_authorized_keys`` are:
@@ -75,32 +81,18 @@ Host Keys
^^^^^^^^^
Host keys are for authenticating a specific instance. Many images have default
-host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents
-re-use of a private host key from an image on multiple machines. Since
-removing default host keys is usually the desired behavior this option is
-enabled by default.
-
-Host keys can be added using the ``ssh_keys`` configuration key. The argument
-to this config key should be a dictionary entries for the public and private
-keys of each desired key type. Entries in the ``ssh_keys`` config dict should
-have keys in the format ``<key type>_private``, ``<key type>_public``, and,
-optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``,
-``rsa_public: <key>``, and ``rsa_certificate: <key>``. See below for supported
-key types. Not all key types have to be specified, ones left unspecified will
-not be used. If this config option is used, then no keys will be generated.
+host SSH keys, which can be removed using ``ssh_deletekeys``.
+
+Host keys can be added using the ``ssh_keys`` configuration key.
When host keys are generated the output of the ssh-keygen command(s) can be
displayed on the console using the ``ssh_quiet_keygen`` configuration key.
-This settings defaults to False which displays the keygen output.
.. note::
when specifying private host keys in cloud-config, care should be taken to
ensure that the communication between the data source and the instance is
secure
-.. note::
- to specify multiline private host keys and certificates, use yaml
- multiline syntax
If no host keys are specified using ``ssh_keys``, then keys will be generated
using ``ssh-keygen``. By default one public/private pair of each supported
@@ -113,59 +105,74 @@ system (i.e. if ``ssh_deletekeys`` was false), no key will be generated.
Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``
config flags are:
- - rsa
- dsa
- ecdsa
- ed25519
+ - rsa
+
+Unsupported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``
+config flags are:
-**Internal name:** ``cc_ssh``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- ssh_deletekeys: <true/false>
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
- ...
- -----END RSA PRIVATE KEY-----
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
- rsa_certificate: |
- ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
- ...
- -----END DSA PRIVATE KEY-----
- dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
- dsa_certificate: |
- ssh-dsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
-
- ssh_genkeytypes: <key type>
- disable_root: <true/false>
- disable_root_opts: <disable root options string>
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
- allow_public_ssh_keys: <true/false>
- ssh_publish_hostkeys:
- enabled: <true/false> (Defaults to true)
- blacklist: <list of key types> (Defaults to [dsa])
- ssh_quiet_keygen: <true/false>
+ - ecdsa-sk
+ - ed25519-sk
"""
-import glob
-import os
-import sys
+# Note: We do not support *-sk key types because:
+# 1) In the autogeneration case user interaction with the device is needed
+# which does not fit with a cloud-context.
+# 2) This type of keys are user-based, not hostkeys.
+
+
+meta: MetaSchema = {
+ "id": "cc_ssh",
+ "name": "SSH",
+ "title": "Configure SSH and SSH keys",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ ssh_keys:
+ rsa_private: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+ ...
+ -----END RSA PRIVATE KEY-----
+ rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ rsa_certificate: |
+ ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
+ dsa_private: |
+ -----BEGIN DSA PRIVATE KEY-----
+ MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+ ...
+ -----END DSA PRIVATE KEY-----
+ dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ dsa_certificate: |
+ ssh-dsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
+ - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
+ ssh_deletekeys: true
+ ssh_genkeytypes: [rsa, dsa, ecdsa, ed25519]
+ disable_root: true
+ disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+ allow_public_ssh_keys: true
+ ssh_quiet_keygen: true
+ ssh_publish_hostkeys:
+ enabled: true
+ blacklist: [dsa]
+ """ # noqa: E501
+ )
+ ],
+}
-from cloudinit import ssh_util, subp, util
-from cloudinit.distros import ug_util
+__doc__ = get_meta_doc(meta)
GENERATE_KEY_NAMES = ["rsa", "dsa", "ecdsa", "ed25519"]
+pattern_unsupported_config_keys = re.compile(
+ "^(ecdsa-sk|ed25519-sk)_(private|public|certificate)$"
+)
KEY_FILE_TPL = "/etc/ssh/ssh_host_%s_key"
PUBLISH_HOST_KEYS = True
# Don't publish the dsa hostkey by default since OpenSSH recommends not using
@@ -175,19 +182,19 @@ HOST_KEY_PUBLISH_BLACKLIST = ["dsa"]
CONFIG_KEY_TO_FILE = {}
PRIV_TO_PUB = {}
for k in GENERATE_KEY_NAMES:
- CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
- CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)}
- )
CONFIG_KEY_TO_FILE.update(
- {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)}
+ {
+ f"{k}_private": (KEY_FILE_TPL % k, 0o600),
+ f"{k}_public": (f"{KEY_FILE_TPL % k}.pub", 0o600),
+ f"{k}_certificate": (f"{KEY_FILE_TPL % k}-cert.pub", 0o600),
+ }
)
- PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
+ PRIV_TO_PUB[f"{k}_private"] = f"{k}_public"
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-def handle(_name, cfg, cloud, log, _args):
+def handle(_name, cfg, cloud: Cloud, log: Logger, _args):
# remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True):
@@ -201,8 +208,12 @@ def handle(_name, cfg, cloud, log, _args):
if "ssh_keys" in cfg:
# if there are keys and/or certificates in cloud-config, use them
for (key, val) in cfg["ssh_keys"].items():
- # skip entry if unrecognized
if key not in CONFIG_KEY_TO_FILE:
+ if pattern_unsupported_config_keys.match(key):
+ reason = "unsupported"
+ else:
+ reason = "unrecognized"
+ log.warning("Skipping %s ssh_keys" ' entry: "%s"', reason, key)
continue
tgt_fn = CONFIG_KEY_TO_FILE[key][0]
tgt_perms = CONFIG_KEY_TO_FILE[key][1]
@@ -307,7 +318,7 @@ def handle(_name, cfg, cloud, log, _args):
cfg, "disable_root_opts", ssh_util.DISABLE_USER_OPTS
)
- keys = []
+ keys: List[str] = []
if util.get_cfg_option_bool(cfg, "allow_public_ssh_keys", True):
keys = cloud.get_public_ssh_keys() or []
else:
@@ -342,7 +353,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
ssh_util.setup_user_keys(keys, "root", options=key_prefix)
-def get_public_host_keys(blacklist=None):
+def get_public_host_keys(blacklist: Optional[Sequence[str]] = None):
"""Read host keys from /etc/ssh/*.pub files and return them as a list.
@param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa']
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 020c3469..db5c1454 100755..100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -3,34 +3,37 @@
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""SSH AuthKey Fingerprints: Log fingerprints of user SSH keys"""
-"""
-SSH Authkey Fingerprints
-------------------------
-**Summary:** log fingerprints of user SSH keys
+import base64
+import hashlib
+
+from cloudinit import ssh_util, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, ug_util
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.simpletable import SimpleTable
+MODULE_DESCRIPTION = """\
Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
the keys can be specified, but defaults to ``sha256``.
-
-**Internal name:** ``cc_ssh_authkey_fingerprints``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- no_ssh_fingerprints: <true/false>
- authkey_hash: <hash type>
"""
-import base64
-import hashlib
+meta: MetaSchema = {
+ "id": "cc_ssh_authkey_fingerprints",
+ "name": "SSH AuthKey Fingerprints",
+ "title": "Log fingerprints of user SSH keys",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ "no_ssh_fingerprints: true",
+ "authkey_hash: sha512",
+ ],
+}
-from cloudinit import ssh_util, util
-from cloudinit.distros import ug_util
-from cloudinit.simpletable import SimpleTable
+__doc__ = get_meta_doc(meta)
def _split_hash(bin_hash):
@@ -119,6 +122,14 @@ def handle(name, cfg, cloud, log, _args):
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
+ if _cfg.get("no_create_home") or _cfg.get("system"):
+ log.debug(
+ "Skipping printing of ssh fingerprints for user '%s' because "
+ "no home directory is created",
+ user_name,
+ )
+ continue
+
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
_pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index a9575c59..6a15895d 100755..100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -5,42 +5,64 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-SSH Import Id
--------------
-**Summary:** import SSH id
-
-This module imports SSH keys from either a public keyserver, usually launchpad
-or github using ``ssh-import-id``. Keys are referenced by the username they are
-associated with on the keyserver. The keyserver can be specified by prepending
-either ``lp:`` for launchpad or ``gh:`` for github to the username.
-
-**Internal name:** ``cc_ssh_import_id``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
-
- ssh_import_id:
- - user
- - gh:user
- - lp:user
-"""
+"""SSH Import ID: Import SSH id"""
import pwd
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ug_util
+from cloudinit.settings import PER_INSTANCE
# https://launchpad.net/ssh-import-id
distros = ["ubuntu", "debian"]
+SSH_IMPORT_ID_BINARY = "ssh-import-id"
+MODULE_DESCRIPTION = """\
+This module imports SSH keys from either a public keyserver, usually launchpad
+or github using ``ssh-import-id``. Keys are referenced by the username they are
+associated with on the keyserver. The keyserver can be specified by prepending
+either ``lp:`` for launchpad or ``gh:`` for github to the username.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_ssh_import_id",
+ "name": "SSH Import ID",
+ "title": "Import SSH id",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ ssh_import_id:
+ - user
+ - gh:user
+ - lp:user
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
def handle(_name, cfg, cloud, log, args):
+ if not is_key_in_nested_dict(cfg, "ssh_import_id"):
+ log.debug(
+ "Skipping module named ssh-import-id, no 'ssh_import_id'"
+ " directives found."
+ )
+ return
+ elif not subp.which(SSH_IMPORT_ID_BINARY):
+ log.warn(
+ "ssh-import-id is not installed, but module ssh_import_id is "
+ "configured. Skipping module."
+ )
+ return
+
# import for "user: XXXXX"
if len(args) != 0:
user = args[0]
@@ -100,7 +122,38 @@ def import_ssh_ids(ids, user, log):
except KeyError as exc:
raise exc
- cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
+ # TODO: We have a use case that involes setting a proxy value earlier
+ # in boot and the user wants this env used when using ssh-import-id.
+ # E.g.,:
+ # bootcmd:
+ # - mkdir -p /etc/systemd/system/cloud-config.service.d
+ # - mkdir -p /etc/systemd/system/cloud-final.service.d
+ # write_files:
+ # - content: |
+ # http_proxy=http://192.168.1.2:3128/
+ # https_proxy=http://192.168.1.2:3128/
+ # path: /etc/cloud/env
+ # - content: |
+ # [Service]
+ # EnvironmentFile=/etc/cloud/env
+ # PassEnvironment=https_proxy http_proxy
+ # path: /etc/systemd/system/cloud-config.service.d/override.conf
+ # - content: |
+ # [Service]
+ # EnvironmentFile=/etc/cloud/env
+ # PassEnvironment=https_proxy http_proxy
+ # path: /etc/systemd/system/cloud-final.service.d/override.conf
+ #
+ # I'm including the `--preserve-env` here as a one-off, but we should
+ # have a better way of setting env earlier in boot and using it later.
+ # Perhaps a 'set_env' module?
+ cmd = [
+ "sudo",
+ "--preserve-env=https_proxy",
+ "-Hu",
+ user,
+ SSH_IMPORT_ID_BINARY,
+ ] + ids
log.debug("Importing SSH ids for user %s.", user)
try:
@@ -110,4 +163,23 @@ def import_ssh_ids(ids, user, log):
raise exc
-# vi: ts=4 expandtab
+def is_key_in_nested_dict(config: dict, search_key: str) -> bool:
+ """Search for key nested in config.
+
+ Note: A dict embedded in a list of lists will not be found walked - but in
+ this case we don't need it.
+ """
+ for config_key in config.keys():
+ if search_key == config_key:
+ return True
+ if isinstance(config[config_key], dict):
+ if is_key_in_nested_dict(config[config_key], search_key):
+ return True
+ if isinstance(config[config_key], list):
+ # this code could probably be generalized to walking the whole
+ # config by iterating lists in search of dictionaries
+ for item in config[config_key]:
+ if isinstance(item, dict):
+ if is_key_in_nested_dict(item, search_key):
+ return True
+ return False
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index 24e6099e..47da2d06 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -5,31 +5,30 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Timezone
---------
-**Summary:** set system timezone
-
-Set the system timezone. If any args are passed to the module then the first
-will be used for the timezone. Otherwise, the module will attempt to retrieve
-the timezone from cloud config.
-
-**Internal name:** ``cc_timezone``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- timezone: <timezone>
-"""
+"""Timezone: Set the system timezone"""
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
+MODULE_DESCRIPTION = """\
+Sets the system timezone based on the value provided.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_timezone",
+ "name": "Timezone",
+ "title": "Set the system timezone",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ "timezone: US/Eastern",
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index e469bb22..57763c31 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -6,11 +6,7 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
UA_URL = "https://ubuntu.com/advantage"
@@ -32,7 +28,7 @@ meta: MetaSchema = {
Note that when enabling FIPS or FIPS updates you will need to schedule
a reboot to ensure the machine is running the FIPS-compliant kernel.
- See :ref:`Power State Change` for information on how to configure
+ See `Power State Change`_ for information on how to configure
cloud-init to perform this reboot.
"""
),
@@ -53,7 +49,7 @@ meta: MetaSchema = {
# only fips and esm services. Services will only be enabled if
# the environment supports said service. Otherwise warnings will
# be logged for incompatible services specified.
- ubuntu-advantage:
+ ubuntu_advantage:
token: <ua_contract_token>
enable:
- fips
@@ -67,7 +63,7 @@ meta: MetaSchema = {
# completed.
power_state:
mode: reboot
- ubuntu-advantage:
+ ubuntu_advantage:
token: <ua_contract_token>
enable:
- fips
@@ -77,29 +73,7 @@ meta: MetaSchema = {
"frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "ubuntu_advantage": {
- "type": "object",
- "properties": {
- "enable": {
- "type": "array",
- "items": {"type": "string"},
- },
- "token": {
- "type": "string",
- "description": "A contract token obtained from %s."
- % UA_URL,
- },
- },
- "required": ["token"],
- "additionalProperties": False,
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
@@ -194,7 +168,6 @@ def handle(name, cfg, cloud, log, args):
name,
)
return
- validate_cloudconfig_schema(cfg, schema)
if "commands" in ua_section:
msg = (
'Deprecated configuration "ubuntu-advantage: commands" provided.'
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 44a3bdb4..15f621a7 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -7,17 +7,13 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, temp_utils, type_utils, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-frequency = PER_INSTANCE
distros = ["ubuntu"]
+
meta: MetaSchema = {
"id": "cc_ubuntu_drivers",
"name": "Ubuntu Drivers",
@@ -37,47 +33,15 @@ meta: MetaSchema = {
"""
)
],
- "frequency": frequency,
+ "frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "drivers": {
- "type": "object",
- "additionalProperties": False,
- "properties": {
- "nvidia": {
- "type": "object",
- "additionalProperties": False,
- "required": ["license-accepted"],
- "properties": {
- "license-accepted": {
- "type": "boolean",
- "description": (
- "Do you accept the NVIDIA driver license?"
- ),
- },
- "version": {
- "type": "string",
- "description": (
- "The version of the driver to install (e.g."
- ' "390", "410"). Defaults to the latest'
- " version."
- ),
- },
- },
- },
- },
- },
- },
-}
+__doc__ = get_meta_doc(meta)
+
OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
"ubuntu-drivers: error: argument <command>: invalid choice: 'install'"
)
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
-
# Use a debconf template to configure a global debconf variable
# (linux/nvidia/latelink) setting this to "true" allows the
@@ -180,5 +144,4 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Skipping module named %s, no 'drivers' key in config", name)
return
- validate_cloudconfig_schema(cfg, schema)
install_drivers(cfg["drivers"], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index f0aa9b0f..5334f453 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -6,18 +6,22 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Update Etc Hosts
-----------------
-**Summary:** update the hosts file (usually ``/etc/hosts``)
+"""Update Etc Hosts: Update the hosts file (usually ``/etc/hosts``)"""
+
+from textwrap import dedent
+
+from cloudinit import templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module will update the contents of the local hosts database (hosts file;
usually ``/etc/hosts``) based on the hostname/fqdn specified in config.
Management of the hosts file is controlled using ``manage_etc_hosts``. If this
is set to false, cloud-init will not manage the hosts file at all. This is the
default behavior.
-If set to ``true`` or ``template``, cloud-init will generate the hosts file
+If set to ``true``, cloud-init will generate the hosts file
using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the
``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and
``$fqdn`` will be replaced with the hostname and fqdn respectively.
@@ -36,24 +40,57 @@ ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
.. note::
for instructions on specifying hostname and fqdn, see documentation for
``cc_set_hostname``
-
-**Internal name:** ``cc_update_etc_hosts``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- manage_etc_hosts: <true/"template"/false/"localhost">
- fqdn: <fqdn>
- hostname: <fqdn/hostname>
"""
-from cloudinit import templater, util
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_update_etc_hosts",
+ "name": "Update Etc Hosts",
+ "title": "Update the hosts file (usually ``/etc/hosts``)",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Do not update or manage /etc/hosts at all. This is the default behavior.
+ #
+ # Whatever is present at instance boot time will be present after boot.
+ # User changes will not be overwritten.
+ manage_etc_hosts: false
+ """
+ ),
+ dedent(
+ """\
+ # Manage /etc/hosts with cloud-init.
+ # On every boot, /etc/hosts will be re-written from
+ # ``/etc/cloud/templates/hosts.tmpl``.
+ #
+ # The strings '$hostname' and '$fqdn' are replaced in the template
+ # with the appropriate values either from the config-config ``fqdn`` or
+ # ``hostname`` if provided. When absent, the cloud metadata will be
+ # checked for ``local-hostname` which can be split into <hostname>.<fqdn>.
+ #
+ # To make modifications persistent across a reboot, you must modify
+ # ``/etc/cloud/templates/hosts.tmpl``.
+ manage_etc_hosts: true
+ """
+ ),
+ dedent(
+ """\
+ # Update /etc/hosts every boot providing a "localhost" 127.0.1.1 entry
+ # with the latest hostname and fqdn as provided by either IMDS or
+ # cloud-config.
+ # All other entries will be left as is.
+ # 'ping `hostname`' will ping 127.0.1.1
+ manage_etc_hosts: localhost
+ """
+ ),
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
@@ -62,6 +99,11 @@ def handle(name, cfg, cloud, log, _args):
hosts_fn = cloud.distro.hosts_fn
if util.translate_bool(manage_hosts, addons=["template"]):
+ if manage_hosts == "template":
+ log.warning(
+ "DEPRECATED: please use manage_etc_hosts: true instead of"
+ " 'template'"
+ )
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
log.warning(
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 09f6f6da..1042abf3 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -6,38 +6,76 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Update Hostname
----------------
-**Summary:** update hostname and fqdn
+"""Update Hostname: Update hostname and fqdn"""
+
+import os
+from textwrap import dedent
+
+from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module will update the system hostname and fqdn. If ``preserve_hostname``
-is set, then the hostname will not be altered.
+is set ``true``, then the hostname will not be altered.
.. note::
for instructions on specifying hostname and fqdn, see documentation for
``cc_set_hostname``
-
-**Internal name:** ``cc_update_hostname``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- preserve_hostname: <true/false>
- prefer_fqdn_over_hostname: <true/false>
- fqdn: <fqdn>
- hostname: <fqdn/hostname>
"""
-import os
-
-from cloudinit import util
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_update_hostname",
+ "name": "Update Hostname",
+ "title": "Update hostname and fqdn",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # By default: when ``preserve_hostname`` is not specified cloud-init
+ # updates ``/etc/hostname`` per-boot based on the cloud provided
+ # ``local-hostname`` setting. If you manually change ``/etc/hostname``
+ # after boot cloud-init will no longer modify it.
+ #
+ # This default cloud-init behavior is equivalent to this cloud-config:
+ preserve_hostname: false
+ """
+ ),
+ dedent(
+ """\
+ # Prevent cloud-init from updating the system hostname.
+ preserve_hostname: true
+ """
+ ),
+ dedent(
+ """\
+ # Prevent cloud-init from updating ``/etc/hostname``
+ preserve_hostname: true
+ """
+ ),
+ dedent(
+ """\
+ # Set hostname to "external.fqdn.me" instead of "myhost"
+ fqdn: external.fqdn.me
+ hostname: myhost
+ prefer_fqdn_over_hostname: true
+ """
+ ),
+ dedent(
+ """\
+ # Set hostname to "external" instead of "external.fqdn.me" when
+ # cloud metadata provides the ``local-hostname``: "external.fqdn.me".
+ prefer_fqdn_over_hostname: false
+ """
+ ),
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index ef77a799..96e63242 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -4,72 +4,48 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Users and Groups
-----------------
-**Summary:** configure users and groups
+"Users and Groups: Configure users and groups"
+
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit.distros import ug_util
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module configures users and groups. For more detailed information on user
-options, see the ``Including users and groups`` config example.
-
-Groups to add to the system can be specified as a list under the ``groups``
-key. Each entry in the list should either contain a the group name as a string,
-or a dictionary with the group name as the key and a list of users who should
-be members of the group as the value. **Note**: Groups are added before users,
-so any users in a group list must already exist on the system.
-
-The ``users`` config key takes a list of users to configure. The first entry in
-this list is used as the default user for the system. To preserve the standard
-default user for the distro, the string ``default`` may be used as the first
-entry of the ``users`` list. Each entry in the ``users`` list, other than a
-``default`` entry, should be a dictionary of options for the user. Supported
-config keys for an entry in ``users`` are as follows:
-
- - ``name``: The user's login name
- - ``expiredate``: Optional. Date on which the user's account will be
- disabled. Default: none
- - ``gecos``: Optional. Comment about the user, usually a comma-separated
- string of real name and contact information. Default: none
- - ``groups``: Optional. Additional groups to add the user to. Default: none
- - ``homedir``: Optional. Home dir for user. Default is ``/home/<username>``
- - ``inactive``: Optional. Number of days after a password expires until
- the account is permanently disabled. Default: none
- - ``lock_passwd``: Optional. Disable password login. Default: true
- - ``no_create_home``: Optional. Do not create home directory. Default:
- false
- - ``no_log_init``: Optional. Do not initialize lastlog and faillog for
- user. Default: false
- - ``no_user_group``: Optional. Do not create group named after user.
- Default: false
- - ``passwd``: Hash of user password
- - ``primary_group``: Optional. Primary group for user. Default to new group
- named after user.
- - ``selinux_user``: Optional. SELinux user for user's login. Default to
- default SELinux user.
- - ``shell``: Optional. The user's login shell. The default is to set no
- shell, which results in a system-specific default being used.
- - ``snapuser``: Optional. Specify an email address to create the user as
- a Snappy user through ``snap create-user``. If an Ubuntu SSO account is
- associated with the address, username and SSH keys will be requested from
- there. Default: none
- - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's
- authkeys file. Default: none. This key can not be combined with
- ``ssh_redirect_user``.
- - ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
- This key can not be combined with ``ssh_redirect_user``.
- - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
- logins for this user. When specified, all cloud meta-data public SSH
- keys will be set up in a disabled state for this username. Any SSH login
- as this username will timeout and prompt with a message to login instead
- as the configured <default_username> for this instance. Default: false.
- This key can not be combined with ``ssh_import_id`` or
- ``ssh_authorized_keys``.
- - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
- Default: none. An absence of sudo key, or a value of none or false
- will result in no sudo rules being written for the user.
- - ``system``: Optional. Create user as system user with no home directory.
- Default: false
- - ``uid``: Optional. The user's ID. Default: The next available value.
+options, see the :ref:`Including users and groups<yaml_examples>` config
+example.
+
+Groups to add to the system can be specified under the ``groups`` key as
+a string of comma-separated groups to create, or a list. Each item in
+the list should either contain a string of a single group to create,
+or a dictionary with the group name as the key and string of a single user as
+a member of that group or a list of users who should be members of the group.
+
+.. note::
+ Groups are added before users, so any users in a group list must
+ already exist on the system.
+
+Users to add can be specified as a string or list under the ``users`` key.
+Each entry in the list should either be a string or a dictionary. If a string
+is specified, that string can be comma-separated usernames to create or the
+reserved string ``default`` which represents the primary admin user used to
+access the system. The ``default`` user varies per distribution and is
+generally configured in ``/etc/cloud/cloud.cfg`` by the ``default_user`` key.
+
+Each ``users`` dictionary item must contain either a ``name`` or ``snapuser``
+key, otherwise it will be ignored. Omission of ``default`` as the first item
+in the ``users`` list skips creation the default user. If no ``users`` key is
+provided the default behavior is to create the default user via this config::
+
+ users:
+ - default
.. note::
Specifying a hash of a user's password with ``passwd`` is a security risk
@@ -85,68 +61,120 @@ config keys for an entry in ``users`` are as follows:
to already-existing users: ``plain_text_passwd``, ``hashed_passwd``,
``lock_passwd``, ``sudo``, ``ssh_authorized_keys``, ``ssh_redirect_user``.
-**Internal name:** ``cc_users_groups``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- groups:
- - <group>: [<user>, <user>]
- - <group>
+The ``user`` key can be used to override the ``default_user`` configuration
+defined in ``/etc/cloud/cloud.cfg``. The ``user`` value should be a dictionary
+which supports the same config keys as the ``users`` dictionary items.
+"""
- users:
+meta: MetaSchema = {
+ "id": "cc_users_groups",
+ "name": "Users and Groups",
+ "title": "Configure users and groups",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["all"],
+ "examples": [
+ dedent(
+ """\
+ # Add the ``default_user`` from /etc/cloud/cloud.cfg.
+ # This is also the default behavior of cloud-init when no `users` key
+ # is provided.
+ users:
+ - default
+ """
+ ),
+ dedent(
+ """\
+ # Add the 'admingroup' with members 'root' and 'sys' and an empty
+ # group cloud-users.
+ groups:
+ - admingroup: [root,sys]
+ - cloud-users
+ """
+ ),
+ dedent(
+ """\
+ # Skip creation of the <default> user and only create newsuper.
+ # Password-based login is rejected, but the github user TheRealFalcon
+ # and the launchpad user falcojr can SSH as newsuper. The default
+ # shell for newsuper is bash instead of system default.
+ users:
+ - name: newsuper
+ gecos: Big Stuff
+ groups: users, admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ lock_passwd: true
+ ssh_import_id:
+ - lp:falcojr
+ - gh:TheRealFalcon
+ """
+ ),
+ dedent(
+ """\
+ # On a system with SELinux enabled, add youruser and set the
+ # SELinux user to 'staff_u'. When omitted on SELinux, the system will
+ # select the configured default SELinux user.
+ users:
+ - default
+ - name: youruser
+ selinux_user: staff_u
+ """
+ ),
+ dedent(
+ """\
+ # To redirect a legacy username to the <default> user for a
+ # distribution, ssh_redirect_user will accept an SSH connection and
+ # emit a message telling the client to ssh as the <default> user.
+ # SSH clients will get the message:
+ users:
- default
- # User explicitly omitted from sudo permission; also default behavior.
- - name: <some_restricted_user>
+ - name: nosshlogins
+ ssh_redirect_user: true
+ """
+ ),
+ dedent(
+ """\
+ # Override any ``default_user`` config in /etc/cloud/cloud.cfg with
+ # supplemental config options.
+ # This config will make the default user to mynewdefault and change
+ # the user to not have sudo rights.
+ ssh_import_id: [chad.smith]
+ user:
+ name: mynewdefault
sudo: false
- - name: <username>
- expiredate: '<date>'
- gecos: <comment>
- groups: <additional groups>
- homedir: <home directory>
- inactive: '<number of days>'
- lock_passwd: <true/false>
- no_create_home: <true/false>
- no_log_init: <true/false>
- no_user_group: <true/false>
- passwd: <password>
- primary_group: <primary group>
- selinux_user: <selinux username>
- shell: <shell path>
- snapuser: <email>
- ssh_redirect_user: <true/false>
- ssh_authorized_keys:
- - <key>
- - <key>
- ssh_import_id: <id>
- sudo: <sudo config>
- system: <true/false>
- uid: <user id>
-"""
-
-from cloudinit import log as logging
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit.distros import ug_util
-from cloudinit.settings import PER_INSTANCE
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
-frequency = PER_INSTANCE
+# NO_HOME and NEED_HOME are mutually exclusive options
+NO_HOME = ("no_create_home", "system")
+NEED_HOME = ("ssh_authorized_keys", "ssh_import_id", "ssh_redirect_user")
def handle(name, cfg, cloud, _log, _args):
(users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(default_user, _user_config) = ug_util.extract_default(users)
cloud_keys = cloud.get_public_ssh_keys() or []
+
for (name, members) in groups.items():
cloud.distro.create_group(name, members)
+
for (user, config) in users.items():
+
+ no_home = [key for key in NO_HOME if config.get(key)]
+ need_home = [key for key in NEED_HOME if config.get(key)]
+ if no_home and need_home:
+ raise ValueError(
+ f"Not creating user {user}. Key(s) {', '.join(need_home)}"
+ f" cannot be provided with {', '.join(no_home)}"
+ )
+
ssh_redirect_user = config.pop("ssh_redirect_user", False)
if ssh_redirect_user:
if "ssh_authorized_keys" in config or "ssh_import_id" in config:
@@ -173,6 +201,7 @@ def handle(name, cfg, cloud, _log, _args):
else:
config["ssh_redirect_user"] = default_user
config["cloud_public_ssh_keys"] = cloud_keys
+
cloud.distro.create_user(user, **config)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 37dae392..7cc7f854 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -12,41 +12,16 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
DEFAULT_DEFER = False
-UNKNOWN_ENC = "text/plain"
+TEXT_PLAIN_ENC = "text/plain"
LOG = logging.getLogger(__name__)
-distros = ["all"]
-
-# The schema definition for each cloud-config module is a strict contract for
-# describing supported configuration parameters for each cloud-config section.
-# It allows cloud-config to validate and alert users to invalid or ignored
-# configuration options before actually attempting to deploy with said
-# configuration.
-
-supported_encoding_types = [
- "gz",
- "gzip",
- "gz+base64",
- "gzip+base64",
- "gz+b64",
- "gzip+b64",
- "b64",
- "base64",
-]
-
meta: MetaSchema = {
"id": "cc_write_files",
"name": "Write Files",
@@ -70,7 +45,7 @@ meta: MetaSchema = {
the early boot process. Use /run/somedir instead to avoid race
LP:1707222."""
),
- "distros": distros,
+ "distros": ["all"],
"examples": [
dedent(
"""\
@@ -132,113 +107,13 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "write_files": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "path": {
- "type": "string",
- "description": dedent(
- """\
- Path of the file to which ``content`` is decoded
- and written
- """
- ),
- },
- "content": {
- "type": "string",
- "default": "",
- "description": dedent(
- """\
- Optional content to write to the provided ``path``.
- When content is present and encoding is not '%s',
- decode the content prior to writing. Default:
- **''**
- """
- % UNKNOWN_ENC
- ),
- },
- "owner": {
- "type": "string",
- "default": DEFAULT_OWNER,
- "description": dedent(
- """\
- Optional owner:group to chown on the file. Default:
- **{owner}**
- """.format(
- owner=DEFAULT_OWNER
- )
- ),
- },
- "permissions": {
- "type": "string",
- "default": oct(DEFAULT_PERMS).replace("o", ""),
- "description": dedent(
- """\
- Optional file permissions to set on ``path``
- represented as an octal string '0###'. Default:
- **'{perms}'**
- """.format(
- perms=oct(DEFAULT_PERMS).replace("o", "")
- )
- ),
- },
- "encoding": {
- "type": "string",
- "default": UNKNOWN_ENC,
- "enum": supported_encoding_types,
- "description": dedent(
- """\
- Optional encoding type of the content. Default is
- **text/plain** and no content decoding is
- performed. Supported encoding types are:
- %s."""
- % ", ".join(supported_encoding_types)
- ),
- },
- "append": {
- "type": "boolean",
- "default": False,
- "description": dedent(
- """\
- Whether to append ``content`` to existing file if
- ``path`` exists. Default: **false**.
- """
- ),
- },
- "defer": {
- "type": "boolean",
- "default": DEFAULT_DEFER,
- "description": dedent(
- """\
- Defer writing the file until 'final' stage, after
- users were created, and packages were installed.
- Default: **{defer}**.
- """.format(
- defer=DEFAULT_DEFER
- )
- ),
- },
- },
- "required": ["path"],
- "additionalProperties": False,
- },
- }
- },
+ "frequency": PER_INSTANCE,
}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, _cloud, log, _args):
- validate_cloudconfig_schema(cfg, schema)
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -266,14 +141,18 @@ def canonicalize_extraction(encoding_type):
# Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
- # specifing it manually (which might be a possiblity)
+ # specifing it manually (which might be a possibility)
if encoding_type in ["b64", "base64"]:
return ["application/base64"]
+ if encoding_type == TEXT_PLAIN_ENC:
+ return [TEXT_PLAIN_ENC]
if encoding_type:
LOG.warning(
- "Unknown encoding type %s, assuming %s", encoding_type, UNKNOWN_ENC
+ "Unknown encoding type %s, assuming %s",
+ encoding_type,
+ TEXT_PLAIN_ENC,
)
- return [UNKNOWN_ENC]
+ return [TEXT_PLAIN_ENC]
def write_files(name, files):
@@ -327,7 +206,7 @@ def extract_contents(contents, extraction_types):
result = util.decomp_gzip(result, quiet=False, decode=False)
elif t == "application/base64":
result = base64.b64decode(result)
- elif t == UNKNOWN_ENC:
+ elif t == TEXT_PLAIN_ENC:
pass
return result
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
index 1294628c..dbbe90f6 100644
--- a/cloudinit/config/cc_write_files_deferred.py
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -2,41 +2,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""Defer writing certain files"""
+"""Write Files Deferred: Defer writing certain files"""
from cloudinit import util
-from cloudinit.config.cc_write_files import DEFAULT_DEFER
-from cloudinit.config.cc_write_files import schema as write_files_schema
-from cloudinit.config.cc_write_files import write_files
-from cloudinit.config.schema import validate_cloudconfig_schema
-
-# meta is not used in this module, but it remains as code documentation
-#
-# id: cc_write_files_deferred'
-# name: 'Write Deferred Files
-# distros: ['all'],
-# frequency: PER_INSTANCE,
-# title:
-# write certain files, whose creation as been deferred, during
-# final stage
-# description:
-# This module is based on `'Write Files' <write-files>`__, and
-# will handle all files from the write_files list, that have been
-# marked as deferred and thus are not being processed by the
-# write-files module.
-#
-# *Please note that his module is not exposed to the user through
-# its own dedicated top-level directive.*
-
-schema = write_files_schema
-
-
-# Not exposed, because related modules should document this behaviour
-__doc__ = None
+from cloudinit.config.cc_write_files import DEFAULT_DEFER, write_files
+from cloudinit.config.schema import MetaSchema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+MODULE_DESCRIPTION = """\
+This module is based on `'Write Files' <write-files>`__, and
+will handle all files from the write_files list, that have been
+marked as deferred and thus are not being processed by the
+write-files module.
+
+*Please note that his module is not exposed to the user through
+its own dedicated top-level directive.*
+"""
+meta: MetaSchema = {
+ "id": "cc_write_files_deferred",
+ "name": "Write Files Deferred",
+ "title": "Defer writing certain files",
+ "description": __doc__,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
+
+# This module is undocumented in our schema docs
+__doc__ = ""
def handle(name, cfg, _cloud, log, _args):
- validate_cloudconfig_schema(cfg, schema)
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -51,6 +48,3 @@ def handle(name, cfg, _cloud, log, _args):
)
return
write_files(name, filtered_files)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 7a232689..f7357192 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -4,38 +4,23 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Yum Add Repo
-------------
-**Summary:** add yum repository configuration to the system
-
-Add yum repository configuration to ``/etc/yum.repos.d``. Configuration files
-are named based on the dictionary key under the ``yum_repos`` they are
-specified with. If a config file already exists with the same name as a config
-entry, the config entry will be skipped.
-
-**Internal name:** ``cc_yum_add_repo``
-
-**Module frequency:** always
-
-**Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora,
- miraclelinux, openEuler, photon, rhel, rocky, virtuozzo
-
-**Config keys**::
-
- yum_repos:
- <repo-name>:
- baseurl: <repo url>
- name: <repo name>
- enabled: <true/false>
- # any repository configuration options (see man yum.conf)
-"""
+"Yum Add Repo: Add yum repository configuration to the system"
import io
import os
from configparser import ConfigParser
+from textwrap import dedent
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+MODULE_DESCRIPTION = """\
+Add yum repository configuration to ``/etc/yum.repos.d``. Configuration files
+are named based on the opaque dictionary key under the ``yum_repos`` they are
+specified with. If a config file already exists with the same name as a config
+entry, the config entry will be skipped.
+"""
distros = [
"almalinux",
@@ -50,6 +35,87 @@ distros = [
"virtuozzo",
]
+COPR_BASEURL = (
+ "https://download.copr.fedorainfracloud.org/results/@cloud-init/"
+ "cloud-init-dev/epel-8-$basearch/"
+)
+COPR_GPG_URL = (
+ "https://download.copr.fedorainfracloud.org/results/@cloud-init/"
+ "cloud-init-dev/pubkey.gpg"
+)
+EPEL_TESTING_BASEURL = (
+ "https://download.copr.fedorainfracloud.org/results/@cloud-init/"
+ "cloud-init-dev/pubkey.gpg"
+)
+
+meta: MetaSchema = {
+ "id": "cc_yum_add_repo",
+ "name": "Yum Add Repo",
+ "title": "Add yum repository configuration to the system",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ yum_repos:
+ my_repo:
+ baseurl: http://blah.org/pub/epel/testing/5/$basearch/
+ yum_repo_dir: /store/custom/yum.repos.d
+ """
+ ),
+ dedent(
+ f"""\
+ # Enable cloud-init upstream's daily testing repo for EPEL 8 to
+ # install latest cloud-init from tip of `main` for testing.
+ yum_repos:
+ cloud-init-daily:
+ name: Copr repo for cloud-init-dev owned by @cloud-init
+ baseurl: {COPR_BASEURL}
+ type: rpm-md
+ skip_if_unavailable: true
+ gpgcheck: true
+ gpgkey: {COPR_GPG_URL}
+ enabled_metadata: 1
+ """
+ ),
+ dedent(
+ f"""\
+ # Add the file /etc/yum.repos.d/epel_testing.repo which can then
+ # subsequently be used by yum for later operations.
+ yum_repos:
+ # The name of the repository
+ epel-testing:
+ baseurl: {EPEL_TESTING_BASEURL}
+ enabled: false
+ failovermethod: priority
+ gpgcheck: true
+ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
+ name: Extra Packages for Enterprise Linux 5 - Testing
+ """
+ ),
+ dedent(
+ """\
+ # Any yum repo configuration can be passed directly into
+ # the repository file created. See: man yum.conf for supported
+ # config keys.
+ #
+ # Write /etc/yum.conf.d/my_package_stream.repo with gpgkey checks
+ # on the repo data of the repositoy enabled.
+ yum_repos:
+ my package stream:
+ baseurl: http://blah.org/pub/epel/testing/5/$basearch/
+ mirrorlist: http://some-url-to-list-of-baseurls
+ repo_gpgcheck: 1
+ enable_gpgcheck: true
+ gpgkey: https://url.to.ascii-armored-gpg-key
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
+
def _canonicalize_id(repo_id):
repo_id = repo_id.lower().replace("-", "_")
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index be444cce..9b682bc6 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -3,7 +3,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""zypper_add_repo: Add zyper repositories to the system"""
+"""zypper_add_repo: Add zypper repositories to the system"""
import os
from textwrap import dedent
@@ -16,22 +16,25 @@ from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
distros = ["opensuse", "sles"]
-
+MODULE_DESCRIPTION = """\
+Zypper behavior can be configured using the ``config`` key, which will modify
+``/etc/zypp/zypp.conf``. The configuration writer will only append the
+provided configuration options to the configuration file. Any duplicate
+options will be resolved by the way the zypp.conf INI file is parsed.
+
+.. note::
+ Setting ``configdir`` is not supported and will be skipped.
+
+The ``repos`` key may be used to add repositories to the system. Beyond the
+required ``id`` and ``baseurl`` attributions, no validation is performed
+on the ``repos`` entries. It is assumed the user is familiar with the
+zypper repository file format.
+"""
meta: MetaSchema = {
"id": "cc_zypper_add_repo",
- "name": "ZypperAddRepo",
+ "name": "Zypper Add Repo",
"title": "Configure zypper behavior and add zypper repositories",
- "description": dedent(
- """\
- Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
- configuration writer is "dumb" and will simply append the provided
- configuration options to the configuration file. Option settings
- that may be duplicate will be resolved by the way the zypp.conf file
- is parsed. The file is in INI format.
- Add repositories to the system. No validation is performed on the
- repository file entries, it is assumed the user is familiar with
- the zypper repository file format."""
- ),
+ "description": MODULE_DESCRIPTION,
"distros": distros,
"examples": [
dedent(
@@ -60,53 +63,7 @@ meta: MetaSchema = {
"frequency": PER_ALWAYS,
}
-schema = {
- "type": "object",
- "properties": {
- "zypper": {
- "type": "object",
- "properties": {
- "repos": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "id": {
- "type": "string",
- "description": dedent(
- """\
- The unique id of the repo, used when
- writing
- /etc/zypp/repos.d/<id>.repo."""
- ),
- },
- "baseurl": {
- "type": "string",
- "format": "uri", # built-in format type
- "description": "The base repositoy URL",
- },
- },
- "required": ["id", "baseurl"],
- "additionalProperties": True,
- },
- "minItems": 1,
- },
- "config": {
- "type": "object",
- "description": dedent(
- """\
- Any supported zypo.conf key is written to
- /etc/zypp/zypp.conf'"""
- ),
- },
- },
- "minProperties": 1, # Either config or repo must be provided
- "additionalProperties": False, # only repos and config allowed
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/config/cloud-init-schema.json b/cloudinit/config/cloud-init-schema.json
deleted file mode 100644
index 2d43d06a..00000000
--- a/cloudinit/config/cloud-init-schema.json
+++ /dev/null
@@ -1,560 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-04/schema#",
- "$defs": {
- "apt_configure.mirror": {
- "type": "array",
- "items": {
- "type": "object",
- "additionalProperties": false,
- "required": ["arches"],
- "properties": {
- "arches": {
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1
- },
- "uri": {"type": "string", "format": "uri"},
- "search": {
- "type": "array",
- "items": {"type": "string", "format": "uri"},
- "minItems": 1
- },
- "search_dns": {
- "type": "boolean"
- },
- "keyid": {"type": "string"},
- "key": {"type": "string"},
- "keyserver": {"type": "string"}
- }
- },
- "minItems": 1
- },
- "ca_certs.properties": {
- "type": "object",
- "properties": {
- "remove-defaults": {
- "description": "Deprecated key name. Use remove_defaults instead.",
- "type": "boolean",
- "default": false
- },
- "remove_defaults": {
- "description": "Remove default CA certificates if true. Default: false",
- "type": "boolean",
- "default": false
- },
- "trusted": {
- "description": "List of trusted CA certificates to add.",
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1
- }
- },
- "additionalProperties": false,
- "minProperties": 1
- },
- "cc_apk_configure": {
- "type": "object",
- "properties": {
- "apk_repos": {
- "type": "object",
- "properties": {
- "preserve_repositories": {
- "type": "boolean",
- "default": false,
- "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``."
- },
- "alpine_repo": {
- "type": ["object", "null"],
- "properties": {
- "base_url": {
- "type": "string",
- "default": "https://alpine.global.ssl.fastly.net/alpine",
- "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``"
- },
- "community_enabled": {
- "type": "boolean",
- "default": false,
- "description": "Whether to add the Community repo to the repositories file. By default the Community repo is not included."
- },
- "testing_enabled": {
- "type": "boolean",
- "default": false,
- "description": "Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended to use the Testing repo on a machine running the ``Edge`` version of Alpine as packages installed from Testing may have dependencies that conflict with those in non-Edge Main or Community repos."
- },
- "version": {
- "type": "string",
- "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)"
- }
- },
- "required": ["version"],
- "minProperties": 1,
- "additionalProperties": false
- },
- "local_repo_base_url": {
- "type": "string",
- "description": "The base URL of an Alpine repository containing unofficial packages"
- }
- },
- "minProperties": 1,
- "additionalProperties": false
- }
- }
- },
- "cc_apt_configure": {
- "properties": {
- "apt": {
- "type": "object",
- "additionalProperties": false,
- "minProperties": 1,
- "properties": {
- "preserve_sources_list": {
- "type": "boolean",
- "default": false,
- "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``."
- },
- "disable_suites": {
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1,
- "uniqueItems": true,
- "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out."
- },
- "primary": {
- "$ref": "#/$defs/apt_configure.mirror",
- "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``<distro>-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``<distro>-mirror``, then it is assumed that there is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``<distro>-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``"
- },
- "security": {
- "$ref": "#/$defs/apt_configure.mirror",
- "description": "Please refer to the primary config documentation"
- },
- "add_apt_repo_match": {
- "type": "string",
- "default": "^[\\w-]+:\\w",
- "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``"
- },
- "debconf_selections": {
- "type": "object",
- "minProperties": 1,
- "patternProperties": {
- "^.+$": {
- "type": "string"
- }
- },
- "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``"
- },
- "sources_list": {
- "type": "string",
- "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``"
- },
- "conf": {
- "type": "string",
- "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline APT configuration, make sure to follow yaml syntax."
- },
- "https_proxy": {
- "type": "string",
- "description": "More convenient way to specify https APT proxy. https proxy url is specified in the format ``https://[[user][:pass]@]host[:port]/``."
- },
- "http_proxy": {
- "type": "string",
- "description": "More convenient way to specify http APT proxy. http proxy url is specified in the format ``http://[[user][:pass]@]host[:port]/``."
- },
- "proxy": {
- "type": "string",
- "description": "Alias for defining a http APT proxy."
- },
- "ftp_proxy": {
- "type": "string",
- "description": "More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format ``ftp://[[user][:pass]@]host[:port]/``."
- },
- "sources": {
- "type": "object",
- "patternProperties": {
- "^.+$": {
- "type": "object",
- "properties": {
- "source": {
- "type": "string"
- },
- "keyid": {
- "type": "string"
- },
- "key": {
- "type": "string"
- },
- "keyserver": {
- "type": "string"
- },
- "filename": {
- "type": "string"
- }
- },
- "additionalProperties": false,
- "minProperties": 1
- }
- },
- "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
- }
- }
- }
- }
- },
- "cc_apt_pipelining": {
- "type": "object",
- "properties": {
- "apt_pipelining": {
- "oneOf": [
- {"type": "integer"},
- {"type": "boolean"},
- {"type": "string", "enum": ["none", "unchanged", "os"]}
- ]
- }
- }
- },
- "cc_bootcmd": {
- "type": "object",
- "properties": {
- "bootcmd": {
- "type": "array",
- "items": {
- "oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"}
- ]
- },
- "additionalItems": false,
- "minItems": 1
- }
- }
- },
- "cc_byobu": {
- "type": "object",
- "properties": {
- "byobu_by_default": {
- "type": "string",
- "enum": [
- "enable-system",
- "enable-user",
- "disable-system",
- "disable-user",
- "enable",
- "disable",
- "user",
- "system"
- ]
- }
- }
- },
- "cc_ca_certs": {
- "type": "object",
- "properties": {
- "ca_certs": {
- "$ref": "#/$defs/ca_certs.properties"
- },
- "ca-certs": {
- "$ref": "#/$defs/ca_certs.properties"
- }
- }
- },
- "cc_chef": {
- "type": "object",
- "properties": {
- "chef": {
- "type": "object",
- "additionalProperties": false,
- "minProperties": 1,
- "properties": {
- "directories": {
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1,
- "uniqueItems": true,
- "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``"
- },
- "validation_cert": {
- "type": "string",
- "description": "Optional string to be written to file validation_key. Special value ``system`` means set use existing file."
- },
- "validation_key": {
- "type": "string",
- "default": "/etc/chef/validation.pem",
- "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``"
- },
- "firstboot_path": {
- "type": "string",
- "default": "/etc/chef/firstboot.json",
- "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``"
- },
- "exec": {
- "type": "boolean",
- "default": false,
- "description": "Set true if we should run or not run chef (defaults to false, unless a gem installed is requested where this will then default to true)."
- },
- "client_key": {
- "type": "string",
- "default": "/etc/chef/client.pem",
- "description": "Optional path for client_cert. Default to ``/etc/chef/client.pem``."
- },
- "encrypted_data_bag_secret": {
- "type": "string",
- "default": null,
- "description": "Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to null, meaning that chef will have to look at the path ``/etc/chef/encrypted_data_bag_secret`` for it."
- },
- "environment": {
- "type": "string",
- "default": "_default",
- "description": "Specifies which environment chef will use. By default, it will use the ``_default`` configuration."
- },
- "file_backup_path": {
- "type": "string",
- "default": "/var/backups/chef",
- "description": "Specifies the location in which backup files are stored. By default, it uses the ``/var/backups/chef`` location."
- },
- "file_cache_path": {
- "type": "string",
- "default": "/var/cache/chef",
- "description": "Specifies the location in which chef cache files will be saved. By default, it uses the ``/var/cache/chef`` location."
- },
- "json_attribs": {
- "type": "string",
- "default": "/etc/chef/firstboot.json",
- "description": "Specifies the location in which some chef json data is stored. By default, it uses the ``/etc/chef/firstboot.json`` location."
- },
- "log_level": {
- "type": "string",
- "default": ":info",
- "description": "Defines the level of logging to be stored in the log file. By default this value is set to ``:info``."
- },
- "log_location": {
- "type": "string",
- "default": "/var/log/chef/client.log",
- "description": "Specifies the location of the chef lof file. By default, the location is specified at ``/var/log/chef/client.log``."
- },
- "node_name": {
- "type": "string",
- "description": "The name of the node to run. By default, we will use th instance id as the node name."
- },
- "omnibus_url": {
- "type": "string",
- "default": "https://www.chef.io/chef/install.sh",
- "description": "Omnibus URL if chef should be installed through Omnibus. By default, it uses the ``https://www.chef.io/chef/install.sh``."
- },
- "omnibus_url_retries": {
- "type": "integer",
- "default": 5,
- "description": "The number of retries that will be attempted to reach the Omnibus URL. Default is 5."
- },
- "omnibus_version": {
- "type": "string",
- "description": "Optional version string to require for omnibus install."
- },
- "pid_file": {
- "type": "string",
- "default": "/var/run/chef/client.pid",
- "description": "The location in which a process identification number (pid) is saved. By default, it saves in the ``/var/run/chef/client.pid`` location."
- },
- "server_url": {
- "type": "string",
- "description": "The URL for the chef server"
- },
- "show_time": {
- "type": "boolean",
- "default": true,
- "description": "Show time in chef logs"
- },
- "ssl_verify_mode": {
- "type": "string",
- "default": ":verify_none",
- "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``."
- },
- "validation_name": {
- "type": "string",
- "description": "The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during the initial Chef Infra Client run."
- },
- "force_install": {
- "type": "boolean",
- "default": false,
- "description": "If set to ``true``, forces chef installation, even if it is already installed."
- },
- "initial_attributes": {
- "type": "object",
- "items": {"type": "string"},
- "description": "Specify a list of initial attributes used by the cookbooks."
- },
- "install_type": {
- "type": "string",
- "default": "packages",
- "enum": [
- "packages",
- "gems",
- "omnibus"
- ],
- "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``"
- },
- "run_list": {
- "type": "array",
- "items": {"type": "string"},
- "description": "A run list for a first boot json."
- },
- "chef_license": {
- "type": "string",
- "description": "string that indicates if user accepts or not license related to some of chef products"
- }
- }
- }
- }
- },
- "cc_debug": {
- "type": "object",
- "properties": {
- "debug": {
- "additionalProperties": false,
- "minProperties": 1,
- "type": "object",
- "properties": {
- "verbose": {
- "description": "Should always be true for this module",
- "type": "boolean"
- },
- "output": {
- "description": "Location to write output. Defaults to console + log",
- "type": "string"
- }
- }
- }
- }
- },
- "cc_disable_ec2_metadata": {
- "type": "object",
- "properties": {
- "disable_ec2_metadata": {
- "default": false,
- "description": "Set true to disable IPv4 routes to EC2 metadata. Default: false.",
- "type": "boolean"
- }
- }
- },
- "cc_disk_setup": {
- "type": "object",
- "properties": {
- "device_aliases": {
- "type": "object",
- "patternProperties": {
- "^.+$": {
- "label": "<alias_name>",
- "type": "string",
- "description": "Path to disk to be aliased by this name."
- }
- }
- },
- "disk_setup": {
- "type": "object",
- "patternProperties": {
- "^.+$": {
- "label": "<alias name/path>",
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "table_type": {
- "type": "string",
- "default": "mbr",
- "enum": ["mbr", "gpt"],
- "description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``."
- },
- "layout": {
- "type": ["string", "boolean", "array"],
- "default": false,
- "oneOf": [
- {"type": "string", "enum": ["remove"]},
- {"type": "boolean"},
- {
- "type": "array",
- "items": {
- "oneOf": [
- {"type": "integer"},
- {
- "type": "array",
- "items": {"type": "integer"},
- "minItems": 2,
- "maxItems": 2
- }
- ]
- }
- }
- ],
- "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). Default: ``false``."
- },
- "overwrite": {
- "type": "boolean",
- "default": false,
- "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``"
- }
- }
- }
- }
- },
- "fs_setup": {
- "type": "array",
- "items": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "label": {
- "type": "string",
- "description": "Label for the filesystem."
- },
- "filesystem": {
- "type": "string",
- "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``"
- },
- "device": {
- "type": "string",
- "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
- },
- "partition": {
- "type": ["string", "integer"],
- "oneOf": [
- {
- "type": "string",
- "enum": ["auto", "any", "none"]
- },
- {"type": "integer"}
- ],
- "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
- },
- "overwrite": {
- "type": "boolean",
- "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``"
- },
- "replace_fs": {
- "type": "string",
- "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``."
- },
- "extra_opts": {
- "type": ["array", "string"],
- "items": {"type": "string"},
- "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly."
- },
- "cmd": {
- "type": ["array", "string"],
- "items": {"type": "string"},
- "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command."
- }
- }
- }
- }
- }
- }
- },
- "allOf": [
- { "$ref": "#/$defs/cc_apk_configure" },
- { "$ref": "#/$defs/cc_apt_configure" },
- { "$ref": "#/$defs/cc_apt_pipelining" },
- { "$ref": "#/$defs/cc_bootcmd" },
- { "$ref": "#/$defs/cc_byobu" },
- { "$ref": "#/$defs/cc_ca_certs" },
- { "$ref": "#/$defs/cc_chef" },
- { "$ref": "#/$defs/cc_debug" },
- { "$ref": "#/$defs/cc_disable_ec2_metadata" },
- { "$ref": "#/$defs/cc_disk_setup" }
- ]
-}
diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py
new file mode 100644
index 00000000..efb7a5a4
--- /dev/null
+++ b/cloudinit/config/modules.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2008-2022 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Chuck Short <chuck.short@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+from collections import namedtuple
+from typing import List
+
+from cloudinit import config, importer
+from cloudinit import log as logging
+from cloudinit import type_utils, util
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.helpers import ConfigMerger
+from cloudinit.reporting.events import ReportEventStack
+from cloudinit.settings import FREQUENCIES
+from cloudinit.stages import Init
+
+LOG = logging.getLogger(__name__)
+
+# This prefix is used to make it less
+# of a chance that when importing
+# we will not find something else with the same
+# name in the lookup path...
+MOD_PREFIX = "cc_"
+ModuleDetails = namedtuple(
+ "ModuleDetails", ["module", "name", "frequency", "run_args"]
+)
+
+
+def form_module_name(name):
+ canon_name = name.replace("-", "_")
+ if canon_name.lower().endswith(".py"):
+ canon_name = canon_name[0 : (len(canon_name) - 3)]
+ canon_name = canon_name.strip()
+ if not canon_name:
+ return None
+ if not canon_name.startswith(MOD_PREFIX):
+ canon_name = "%s%s" % (MOD_PREFIX, canon_name)
+ return canon_name
+
+
+def validate_module(mod, name):
+ if (
+ not hasattr(mod, "meta")
+ or "frequency" not in mod.meta
+ or "distros" not in mod.meta
+ ):
+ raise ValueError(
+ f"Module '{mod}' with name '{name}' MUST have a 'meta' attribute "
+ "of type 'MetaSchema'."
+ )
+ if mod.meta["frequency"] not in FREQUENCIES:
+ raise ValueError(
+ f"Module '{mod}' with name '{name}' has an invalid frequency "
+ f"{mod.meta['frequency']}."
+ )
+ if hasattr(mod, "schema"):
+ raise ValueError(
+ f"Module '{mod}' with name '{name}' has a JSON 'schema' attribute "
+ "defined. Please define schema in cloud-init-schema,json."
+ )
+
+
+class Modules(object):
+ def __init__(self, init: Init, cfg_files=None, reporter=None):
+ self.init = init
+ self.cfg_files = cfg_files
+ # Created on first use
+ self._cached_cfg = None
+ if reporter is None:
+ reporter = ReportEventStack(
+ name="module-reporter",
+ description="module-desc",
+ reporting_enabled=False,
+ )
+ self.reporter = reporter
+
+ @property
+ def cfg(self):
+ # None check to avoid empty case causing re-reading
+ if self._cached_cfg is None:
+ merger = ConfigMerger(
+ paths=self.init.paths,
+ datasource=self.init.datasource,
+ additional_fns=self.cfg_files,
+ base_cfg=self.init.cfg,
+ )
+ self._cached_cfg = merger.cfg
+ # Only give out a copy so that others can't modify this...
+ return copy.deepcopy(self._cached_cfg)
+
+ def _read_modules(self, name):
+ """Read the modules from the config file given the specified name.
+
+ Returns a list of module definitions. E.g.,
+ [
+ {
+ "mod": "bootcmd",
+ "freq": "always"
+ "args": "some_arg",
+ }
+ ]
+
+ Note that in the default case, only "mod" will be set.
+ """
+ module_list = []
+ if name not in self.cfg:
+ return module_list
+ cfg_mods = self.cfg.get(name)
+ if not cfg_mods:
+ return module_list
+ for item in cfg_mods:
+ if not item:
+ continue
+ if isinstance(item, str):
+ module_list.append(
+ {
+ "mod": item.strip(),
+ }
+ )
+ elif isinstance(item, (list)):
+ contents = {}
+ # Meant to fall through...
+ if len(item) >= 1:
+ contents["mod"] = item[0].strip()
+ if len(item) >= 2:
+ contents["freq"] = item[1].strip()
+ if len(item) >= 3:
+ contents["args"] = item[2:]
+ if contents:
+ module_list.append(contents)
+ elif isinstance(item, (dict)):
+ contents = {}
+ valid = False
+ if "name" in item:
+ contents["mod"] = item["name"].strip()
+ valid = True
+ if "frequency" in item:
+ contents["freq"] = item["frequency"].strip()
+ if "args" in item:
+ contents["args"] = item["args"] or []
+ if contents and valid:
+ module_list.append(contents)
+ else:
+ raise TypeError(
+ "Failed to read '%s' item in config, unknown type %s"
+ % (item, type_utils.obj_name(item))
+ )
+ return module_list
+
+ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]:
+ """Convert list of returned from _read_modules() into new format.
+
+ Invalid modules and arguments are ingnored.
+ Also ensures that the module has the required meta fields.
+ """
+ mostly_mods = []
+ for raw_mod in raw_mods:
+ raw_name = raw_mod["mod"]
+ freq = raw_mod.get("freq")
+ run_args = raw_mod.get("args") or []
+ mod_name = form_module_name(raw_name)
+ if not mod_name:
+ continue
+ if freq and freq not in FREQUENCIES:
+ LOG.warning(
+ "Config specified module %s has an unknown frequency %s",
+ raw_name,
+ freq,
+ )
+ # Misconfigured in /etc/cloud/cloud.cfg. Reset so cc_* module
+ # default meta attribute "frequency" value is used.
+ freq = None
+ mod_locs, looked_locs = importer.find_module(
+ mod_name, ["", type_utils.obj_name(config)], ["handle"]
+ )
+ if not mod_locs:
+ LOG.warning(
+ "Could not find module named %s (searched %s)",
+ mod_name,
+ looked_locs,
+ )
+ continue
+ mod = importer.import_module(mod_locs[0])
+ validate_module(mod, raw_name)
+ if freq is None:
+ # Use cc_* module default setting since no cloud.cfg overrides
+ freq = mod.meta["frequency"]
+ mostly_mods.append(
+ ModuleDetails(
+ module=mod,
+ name=raw_name,
+ frequency=freq,
+ run_args=run_args,
+ )
+ )
+ return mostly_mods
+
+ def _run_modules(self, mostly_mods: List[ModuleDetails]):
+ cc = self.init.cloudify()
+ # Return which ones ran
+ # and which ones failed + the exception of why it failed
+ failures = []
+ which_ran = []
+ for (mod, name, freq, args) in mostly_mods:
+ try:
+ LOG.debug(
+ "Running module %s (%s) with frequency %s", name, mod, freq
+ )
+
+ # Use the configs logger and not our own
+ # TODO(harlowja): possibly check the module
+ # for having a LOG attr and just give it back
+ # its own logger?
+ func_args = [name, self.cfg, cc, LOG, args]
+ # Mark it as having started running
+ which_ran.append(name)
+ # This name will affect the semaphore name created
+ run_name = f"config-{name}"
+
+ desc = "running %s with frequency %s" % (run_name, freq)
+ myrep = ReportEventStack(
+ name=run_name, description=desc, parent=self.reporter
+ )
+
+ with myrep:
+ ran, _r = cc.run(
+ run_name, mod.handle, func_args, freq=freq
+ )
+ if ran:
+ myrep.message = "%s ran successfully" % run_name
+ else:
+ myrep.message = "%s previously ran" % run_name
+
+ except Exception as e:
+ util.logexc(LOG, "Running module %s (%s) failed", name, mod)
+ failures.append((name, e))
+ return (which_ran, failures)
+
+ def run_single(self, mod_name, args=None, freq=None):
+ # Form the users module 'specs'
+ mod_to_be = {
+ "mod": mod_name,
+ "args": args,
+ "freq": freq,
+ }
+ # Now resume doing the normal fixups and running
+ raw_mods = [mod_to_be]
+ mostly_mods = self._fixup_modules(raw_mods)
+ return self._run_modules(mostly_mods)
+
+ def run_section(self, section_name):
+ """Runs all modules in the given section.
+
+ section_name - One of the modules lists as defined in
+ /etc/cloud/cloud.cfg. One of:
+ - cloud_init_modules
+ - cloud_config_modules
+ - cloud_final_modules
+ """
+ raw_mods = self._read_modules(section_name)
+ mostly_mods = self._fixup_modules(raw_mods)
+ distro_name = self.init.distro.name
+
+ skipped = []
+ forced = []
+ overridden = self.cfg.get("unverified_modules", [])
+ active_mods = []
+ for (mod, name, _freq, _args) in mostly_mods:
+ if mod is None:
+ continue
+ worked_distros = mod.meta["distros"]
+
+ # Skip only when the following conditions are all met:
+ # - distros are defined in the module != ALL_DISTROS
+ # - the current d_name isn't in distros
+ # - and the module is unverified and not in the unverified_modules
+ # override list
+ if worked_distros and worked_distros != [ALL_DISTROS]:
+ if distro_name not in worked_distros:
+ if name not in overridden:
+ skipped.append(name)
+ continue
+ forced.append(name)
+ active_mods.append([mod, name, _freq, _args])
+
+ if skipped:
+ LOG.info(
+ "Skipping modules '%s' because they are not verified "
+ "on distro '%s'. To run anyway, add them to "
+ "'unverified_modules' in config.",
+ ",".join(skipped),
+ distro_name,
+ )
+ if forced:
+ LOG.info("running unverified_modules: '%s'", ", ".join(forced))
+
+ return self._run_modules(active_mods)
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 1f969c97..7a6ecf08 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -7,20 +7,24 @@ import logging
import os
import re
import sys
+import typing
from collections import defaultdict
from copy import deepcopy
from functools import partial
import yaml
-from cloudinit import importer
+from cloudinit import importer, safeyaml
from cloudinit.cmd.devel import read_cfg_paths
-from cloudinit.importer import MetaSchema
from cloudinit.util import error, find_modules, load_file
error = partial(error, sys_exit=True)
LOG = logging.getLogger(__name__)
+VERSIONED_USERDATA_SCHEMA_FILE = "versions.schema.cloud-config.json"
+# Bump this file when introducing incompatible schema changes.
+# Also add new version definition to versions.schema.json.
+USERDATA_SCHEMA_FILE = "schema-cloud-config-v1.json"
_YAML_MAP = {True: "true", False: "false", None: "null"}
CLOUD_CONFIG_HEADER = b"#cloud-config"
SCHEMA_DOC_TMPL = """
@@ -36,18 +40,39 @@ SCHEMA_DOC_TMPL = """
**Supported distros:** {distros}
-**Config schema**:
+{property_header}
{property_doc}
+
{examples}
"""
-SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}"
+SCHEMA_PROPERTY_HEADER = "**Config schema**:"
+SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}){description}"
SCHEMA_LIST_ITEM_TMPL = (
- "{prefix}Each item in **{prop_name}** list supports the following keys:"
+ "{prefix}Each object in **{prop_name}** list supports the following keys:"
)
-SCHEMA_EXAMPLES_HEADER = "\n**Examples**::\n\n"
+SCHEMA_EXAMPLES_HEADER = "**Examples**::\n\n"
SCHEMA_EXAMPLES_SPACER_TEMPLATE = "\n # --- Example{0} ---"
+# annotations add value for development, but don't break old versions
+# pyver: 3.6 -> 3.8
+# pylint: disable=E1101
+if sys.version_info >= (3, 8):
+
+ class MetaSchema(typing.TypedDict):
+ name: str
+ id: str
+ title: str
+ description: str
+ distros: typing.List[str]
+ examples: typing.List[str]
+ frequency: str
+
+else:
+ MetaSchema = dict
+# pylint: enable=E1101
+
+
class SchemaValidationError(ValueError):
"""Raised when validating a cloud-config file against a schema."""
@@ -215,7 +240,9 @@ def validate_cloudconfig_schema(
)
-def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
+def annotated_cloudconfig_file(
+ cloudconfig, original_content, schema_errors, schemamarks
+):
"""Return contents of the cloud-config file annotated with schema errors.
@param cloudconfig: YAML-loaded dict from the original_content or empty
@@ -226,7 +253,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
"""
if not schema_errors:
return original_content
- schemapaths = {}
errors_by_line = defaultdict(list)
error_footer = []
error_header = "# Errors: -------------\n{0}\n\n"
@@ -238,10 +264,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
lines
+ [error_header.format("# E1: Cloud-config is not a YAML dict.")]
)
- if cloudconfig:
- schemapaths = _schemapath_for_cloudconfig(
- cloudconfig, original_content
- )
for path, msg in schema_errors:
match = re.match(r"format-l(?P<line>\d+)\.c(?P<col>\d+).*", path)
if match:
@@ -249,7 +271,7 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
errors_by_line[int(line)].append(msg)
else:
col = None
- errors_by_line[schemapaths[path]].append(msg)
+ errors_by_line[schemamarks[path]].append(msg)
if col is not None:
msg = "Line {line} column {col}: {msg}".format(
line=line, col=col, msg=msg
@@ -310,10 +332,18 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
)
error = SchemaValidationError(errors)
if annotate:
- print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ print(
+ annotated_cloudconfig_file(
+ {}, content, error.schema_errors, {}
+ )
+ )
raise error
try:
- cloudconfig = yaml.safe_load(content)
+ if annotate:
+ cloudconfig, marks = safeyaml.load_with_marks(content)
+ else:
+ cloudconfig = safeyaml.load(content)
+ marks = {}
except (yaml.YAMLError) as e:
line = column = 1
mark = None
@@ -332,7 +362,11 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
)
error = SchemaValidationError(errors)
if annotate:
- print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ print(
+ annotated_cloudconfig_file(
+ {}, content, error.schema_errors, {}
+ )
+ )
raise error from e
if not isinstance(cloudconfig, dict):
# Return a meaningful message on empty cloud-config
@@ -344,103 +378,63 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
if annotate:
print(
annotated_cloudconfig_file(
- cloudconfig, content, e.schema_errors
+ cloudconfig, content, e.schema_errors, marks
)
)
raise
-def _schemapath_for_cloudconfig(config, original_content):
- """Return a dictionary mapping schemapath to original_content line number.
+def _sort_property_order(value):
+ """Provide a sorting weight for documentation of property types.
- @param config: The yaml.loaded config dictionary of a cloud-config file.
- @param original_content: The simple file content of the cloud-config file
+ Weight values ensure 'array' sorted after 'object' which is sorted
+ after anything else which remains unsorted.
"""
- # TODO( handle multi-line lists or multi-line strings, inline dicts)
- content_lines = original_content.decode().split("\n")
- schema_line_numbers = {}
- list_index = 0
- RE_YAML_INDENT = r"^(\s*)"
- scopes = []
- if not config:
- return {} # No YAML config dict, no schemapaths to annotate
- for line_number, line in enumerate(content_lines, 1):
- indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- if scopes:
- previous_depth, path_prefix = scopes[-1]
- else:
- previous_depth = -1
- path_prefix = ""
- if line.startswith("- "):
- # Process list items adding a list_index to the path prefix
- previous_list_idx = ".%d" % (list_index - 1)
- if path_prefix and path_prefix.endswith(previous_list_idx):
- path_prefix = path_prefix[: -len(previous_list_idx)]
- key = str(list_index)
- item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
- item_indent += 1 # For the leading '-' character
- previous_depth = indent_depth
- indent_depth += item_indent
- line = line[item_indent:] # Strip leading list item + whitespace
- list_index += 1
- else:
- # Process non-list lines setting value if present
- list_index = 0
- key, value = line.split(":", 1)
- if path_prefix and indent_depth > previous_depth:
- # Append any existing path_prefix for a fully-pathed key
- key = path_prefix + "." + key
- while indent_depth <= previous_depth:
- if scopes:
- previous_depth, path_prefix = scopes.pop()
- if list_index > 0 and indent_depth == previous_depth:
- path_prefix = ".".join(path_prefix.split(".")[:-1])
- break
- else:
- previous_depth = -1
- path_prefix = ""
- scopes.append((indent_depth, key))
- if value:
- value = value.strip()
- if value.startswith("["):
- scopes.append((indent_depth + 2, key + ".0"))
- for inner_list_index in range(0, len(yaml.safe_load(value))):
- list_key = key + "." + str(inner_list_index)
- schema_line_numbers[list_key] = line_number
- schema_line_numbers[key] = line_number
- return schema_line_numbers
-
-
-def _get_property_type(property_dict: dict) -> str:
+ if value == "array":
+ return 2
+ elif value == "object":
+ return 1
+ return 0
+
+
+def _get_property_type(property_dict: dict, defs: dict) -> str:
"""Return a string representing a property type from a given
jsonschema.
"""
- property_type = property_dict.get("type")
- if property_type is None:
- if property_dict.get("enum"):
- property_type = [
- str(_YAML_MAP.get(k, k)) for k in property_dict["enum"]
- ]
- elif property_dict.get("oneOf"):
- property_type = [
+ _flatten_schema_refs(property_dict, defs)
+ property_types = property_dict.get("type", [])
+ if not isinstance(property_types, list):
+ property_types = [property_types]
+ if property_dict.get("enum"):
+ property_types = [
+ f"``{_YAML_MAP.get(k, k)}``" for k in property_dict["enum"]
+ ]
+ elif property_dict.get("oneOf"):
+ property_types.extend(
+ [
subschema["type"]
for subschema in property_dict.get("oneOf")
if subschema.get("type")
]
- if isinstance(property_type, list):
- property_type = "/".join(property_type)
+ )
+ if len(property_types) == 1:
+ property_type = property_types[0]
+ else:
+ property_types.sort(key=_sort_property_order)
+ property_type = "/".join(property_types)
items = property_dict.get("items", {})
- sub_property_type = items.get("type", "")
+ sub_property_types = items.get("type", [])
+ if not isinstance(sub_property_types, list):
+ sub_property_types = [sub_property_types]
# Collect each item type
for sub_item in items.get("oneOf", {}):
- if sub_property_type:
- sub_property_type += "/"
- sub_property_type += "(" + _get_property_type(sub_item) + ")"
- if sub_property_type:
- return "{0} of {1}".format(property_type, sub_property_type)
+ sub_property_types.append(_get_property_type(sub_item, defs))
+ if sub_property_types:
+ if len(sub_property_types) == 1:
+ return f"{property_type} of {sub_property_types[0]}"
+ sub_property_types.sort(key=_sort_property_order)
+ sub_property_doc = f"({'/'.join(sub_property_types)})"
+ return f"{property_type} of {sub_property_doc}"
return property_type or "UNDEFINED"
@@ -466,23 +460,50 @@ def _parse_description(description, prefix) -> str:
return description
+def _flatten_schema_refs(src_cfg: dict, defs: dict):
+ """Flatten schema: replace $refs in src_cfg with definitions from $defs."""
+ if "$ref" in src_cfg:
+ reference = src_cfg.pop("$ref").replace("#/$defs/", "")
+ # Update the defined references in subschema for doc rendering
+ src_cfg.update(defs[reference])
+ if "items" in src_cfg:
+ if "$ref" in src_cfg["items"]:
+ reference = src_cfg["items"].pop("$ref").replace("#/$defs/", "")
+ # Update the references in subschema for doc rendering
+ src_cfg["items"].update(defs[reference])
+ if "oneOf" in src_cfg["items"]:
+ for alt_schema in src_cfg["items"]["oneOf"]:
+ if "$ref" in alt_schema:
+ reference = alt_schema.pop("$ref").replace("#/$defs/", "")
+ alt_schema.update(defs[reference])
+ for alt_schema in src_cfg.get("oneOf", []):
+ if "$ref" in alt_schema:
+ reference = alt_schema.pop("$ref").replace("#/$defs/", "")
+ alt_schema.update(defs[reference])
+
+
def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
"""Return restructured text describing the supported schema properties."""
new_prefix = prefix + " "
properties = []
+ if schema.get("hidden") is True:
+ return "" # no docs for this schema
property_keys = [
- schema.get("properties", {}),
- schema.get("patternProperties", {}),
+ key
+ for key in ("properties", "patternProperties")
+ if "hidden" not in schema or key not in schema["hidden"]
]
+ property_schemas = [schema.get(key, {}) for key in property_keys]
- for props in property_keys:
- for prop_key, prop_config in props.items():
- if "$ref" in prop_config:
- # Update the defined references in subschema for doc rendering
- ref = defs[prop_config["$ref"].replace("#/$defs/", "")]
- prop_config.update(ref)
+ for prop_schema in property_schemas:
+ for prop_key, prop_config in prop_schema.items():
+ _flatten_schema_refs(prop_config, defs)
+ if prop_config.get("hidden") is True:
+ continue # document nothing for this property
# Define prop_name and description for SCHEMA_PROPERTY_TMPL
description = prop_config.get("description", "")
+ if description:
+ description = " " + description
# Define prop_name and description for SCHEMA_PROPERTY_TMPL
label = prop_config.get("label", prop_key)
@@ -491,21 +512,13 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
prefix=prefix,
prop_name=label,
description=_parse_description(description, prefix),
- prop_type=_get_property_type(prop_config),
+ prop_type=_get_property_type(prop_config, defs),
)
)
items = prop_config.get("items")
if items:
- if isinstance(items, list):
- for item in items:
- properties.append(
- _get_property_doc(
- item, defs=defs, prefix=new_prefix
- )
- )
- elif isinstance(items, dict) and (
- items.get("properties") or items.get("patternProperties")
- ):
+ _flatten_schema_refs(items, defs)
+ if items.get("properties") or items.get("patternProperties"):
properties.append(
SCHEMA_LIST_ITEM_TMPL.format(
prefix=new_prefix, prop_name=label
@@ -515,6 +528,21 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
properties.append(
_get_property_doc(items, defs=defs, prefix=new_prefix)
)
+ for alt_schema in items.get("oneOf", []):
+ if alt_schema.get("properties") or alt_schema.get(
+ "patternProperties"
+ ):
+ properties.append(
+ SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=label
+ )
+ )
+ new_prefix += " "
+ properties.append(
+ _get_property_doc(
+ alt_schema, defs=defs, prefix=new_prefix
+ )
+ )
if (
"properties" in prop_config
or "patternProperties" in prop_config
@@ -585,6 +613,7 @@ def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
# cast away type annotation
meta_copy = dict(deepcopy(meta))
+ meta_copy["property_header"] = ""
defs = schema.get("$defs", {})
if defs.get(meta["id"]):
schema = defs.get(meta["id"])
@@ -593,6 +622,8 @@ def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
except AttributeError:
LOG.warning("Unable to render property_doc due to invalid schema")
meta_copy["property_doc"] = ""
+ if meta_copy["property_doc"]:
+ meta_copy["property_header"] = SCHEMA_PROPERTY_HEADER
meta_copy["examples"] = _get_examples(meta)
meta_copy["distros"] = ", ".join(meta["distros"])
# Need an underbar of the same length as the name
@@ -632,11 +663,22 @@ def load_doc(requested_modules: list) -> str:
return docs
+def get_schema_dir() -> str:
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "schemas")
+
+
def get_schema() -> dict:
"""Return jsonschema coalesced from all cc_* cloud-config modules."""
- schema_file = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "cloud-init-schema.json"
- )
+ # Note versions.schema.json is publicly consumed by schemastore.org.
+ # If we change the location of versions.schema.json in github, we need
+ # to provide an updated PR to
+ # https://github.com/SchemaStore/schemastore.
+
+ # When bumping schema version due to incompatible changes:
+ # 1. Add a new schema-cloud-config-v#.json
+ # 2. change the USERDATA_SCHEMA_FILE to cloud-init-schema-v#.json
+ # 3. Add the new version definition to versions.schema.cloud-config.json
+ schema_file = os.path.join(get_schema_dir(), USERDATA_SCHEMA_FILE)
full_schema = None
try:
full_schema = json.loads(load_file(schema_file))
@@ -653,20 +695,6 @@ def get_schema() -> dict:
"$schema": "http://json-schema.org/draft-04/schema#",
"allOf": [],
}
-
- # TODO( Drop the get_modules loop when all legacy cc_* schema migrates )
- # Supplement base_schema with any legacy modules which still contain a
- # "schema" attribute. Legacy cc_* modules will be migrated to use the
- # store module schema in the composite cloud-init-schema-<version>.json
- # and will drop "schema" at that point.
- for (_, mod_name) in get_modules().items():
- # All cc_* modules need a "meta" attribute to represent schema defs
- (mod_locs, _) = importer.find_module(
- mod_name, ["cloudinit.config"], ["schema"]
- )
- if mod_locs:
- mod = importer.import_module(mod_locs[0])
- full_schema["allOf"].append(mod.schema)
return full_schema
diff --git a/cloudinit/config/schemas/__init__.py b/cloudinit/config/schemas/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/config/schemas/__init__.py
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
new file mode 100644
index 00000000..d409d5d6
--- /dev/null
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -0,0 +1,2273 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {
+ "users_groups.groups_by_groupname": {
+ "patternProperties": {
+ "^.+$": {
+ "label": "<group_name>",
+ "description": "Optional string of single username or a list of usernames to add to the group",
+ "type": ["string", "array"],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "users_groups.user": {
+ "oneOf": [
+ {"required": ["name"]},
+ {"required": ["snapuser"]}
+ ],
+ "properties": {
+ "name": {
+ "description": "The user's login name. Required otherwise user creation will be skipped for this user.",
+ "type": "string"
+ },
+ "expiredate": {
+ "default": null,
+ "description": "Optional. Date on which the user's account will be disabled. Default: ``null``",
+ "type": "string"
+ },
+ "gecos": {
+ "description": "Optional comment about the user, usually a comma-separated string of real name and contact information",
+ "type": "string"
+ },
+ "groups": {
+ "description": "Optional comma-separated string of groups to add the user to.",
+ "type": "string"
+ },
+ "homedir": {
+ "description": "Optional home dir for user. Default: ``/home/<username>``",
+ "default": "``/home/<username>``",
+ "type": "string"
+ },
+ "inactive": {
+ "description": "Optional string representing the number of days until the user is disabled. ",
+ "type": "string"
+ },
+ "lock_passwd": {
+ "default": true,
+ "description": "Disable password login. Default: ``true``",
+ "type": "boolean"
+ },
+ "no_create_home": {
+ "default": false,
+ "description": "Do not create home directory. Default: ``false``",
+ "type": "boolean"
+ },
+ "no_log_init": {
+ "default": false,
+ "description": "Do not initialize lastlog and faillog for user. Default: ``false``",
+ "type": "boolean"
+ },
+ "no_user_group": {
+ "default": false,
+ "description": "Do not create group named after user. Default: ``false``",
+ "type": "boolean"
+ },
+ "passwd": {
+ "description": "Hash of user password applied when user does not exist. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While hashed password is better than plain text, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
+ "type": "string"
+ },
+ "hashed_passwd": {
+ "description": "Hash of user password applied to new or existing users. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While ``hashed_password`` is better than ``plain_text_passwd``, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
+ "type": "string"
+ },
+ "plain_text_passwd": {
+ "description": "Clear text of user password applied to new or existing users. There are many more secure options than using plain text passwords, such as ``ssh_import_id`` or ``hashed_passwd``. Do not use this in production as user-data and your password can be exposed.",
+ "type": "string"
+ },
+ "create_groups": {
+ "default": true,
+ "description": "Boolean set ``false`` to disable creation of specified user ``groups``. Default: ``true``.",
+ "type": "boolean"
+ },
+ "primary_group": {
+ "default": "``<username>``",
+ "description": "Primary group for user. Default: ``<username>``",
+ "type": "string"
+ },
+ "selinux_user": {
+ "description": "SELinux user for user's login. Default to default SELinux user.",
+ "type": "string"
+ },
+ "shell": {
+ "description": "Path to the user's login shell. The default is to set no shell, which results in a system-specific default being used.",
+ "type": "string"
+ },
+ "snapuser": {
+ "description": " Specify an email address to create the user as a Snappy user through ``snap create-user``. If an Ubuntu SSO account is associated with the address, username and SSH keys will be requested from there.",
+ "type": "string"
+ },
+ "ssh_authorized_keys": {
+ "description": "List of SSH keys to add to user's authkeys file. Can not be combined with ``ssh_redirect_user``",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "ssh_import_id": {
+ "description": "List of SSH IDs to import for user. Can not be combined with ``ssh_redirect_user``.",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "ssh_redirect_user": {
+ "type": "boolean",
+ "default": false,
+ "description": "Boolean set to true to disable SSH logins for this user. When specified, all cloud meta-data public SSH keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the ``default_username`` for this instance. Default: ``false``. This key can not be combined with ``ssh_import_id`` or ``ssh_authorized_keys``."
+ },
+ "system": {
+ "description": "Optional. Create user as system user with no home directory. Default: ``false``.",
+ "type": "boolean",
+ "default": false
+ },
+ "sudo": {
+ "type": ["boolean", "string"],
+ "description": "Sudo rule to use or false. Absence of a sudo value or ``false`` will result in no sudo rules added for this user. DEPRECATED: the value ``false`` will be deprecated in the future release. Use ``null`` or no ``sudo`` key instead."
+ },
+ "uid": {
+ "description": "The user's ID. Default is next available value.",
+ "type": "integer"
+ }
+ },
+ "additionalProperties": false
+ },
+ "apt_configure.mirror": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["arches"],
+ "properties": {
+ "arches": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "uri": {"type": "string", "format": "uri"},
+ "search": {
+ "type": "array",
+ "items": {"type": "string", "format": "uri"},
+ "minItems": 1
+ },
+ "search_dns": {
+ "type": "boolean"
+ },
+ "keyid": {"type": "string"},
+ "key": {"type": "string"},
+ "keyserver": {"type": "string"}
+ }
+ },
+ "minItems": 1
+ },
+ "ca_certs.properties": {
+ "type": "object",
+ "properties": {
+ "remove-defaults": {
+ "description": "DEPRECATED. Use ``remove_defaults``. ",
+ "deprecated": true,
+ "type": "boolean",
+ "default": false
+ },
+ "remove_defaults": {
+ "description": "Remove default CA certificates if true. Default: false",
+ "type": "boolean",
+ "default": false
+ },
+ "trusted": {
+ "description": "List of trusted CA certificates to add.",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ },
+ "cc_apk_configure": {
+ "type": "object",
+ "properties": {
+ "apk_repos": {
+ "type": "object",
+ "properties": {
+ "preserve_repositories": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``."
+ },
+ "alpine_repo": {
+ "type": ["object", "null"],
+ "properties": {
+ "base_url": {
+ "type": "string",
+ "default": "https://alpine.global.ssl.fastly.net/alpine",
+ "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``"
+ },
+ "community_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Community repo to the repositories file. By default the Community repo is not included."
+ },
+ "testing_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended to use the Testing repo on a machine running the ``Edge`` version of Alpine as packages installed from Testing may have dependencies that conflict with those in non-Edge Main or Community repos."
+ },
+ "version": {
+ "type": "string",
+ "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)"
+ }
+ },
+ "required": ["version"],
+ "minProperties": 1,
+ "additionalProperties": false
+ },
+ "local_repo_base_url": {
+ "type": "string",
+ "description": "The base URL of an Alpine repository containing unofficial packages"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_apt_configure": {
+ "properties": {
+ "apt": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "preserve_sources_list": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``."
+ },
+ "disable_suites": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out."
+ },
+ "primary": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``<distro>-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``<distro>-mirror``, then it is assumed that there is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``<distro>-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``"
+ },
+ "security": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "Please refer to the primary config documentation"
+ },
+ "add_apt_repo_match": {
+ "type": "string",
+ "default": "^[\\w-]+:\\w",
+ "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``"
+ },
+ "debconf_selections": {
+ "type": "object",
+ "minProperties": 1,
+ "patternProperties": {
+ "^.+$": {
+ "type": "string"
+ }
+ },
+ "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``"
+ },
+ "sources_list": {
+ "type": "string",
+ "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``"
+ },
+ "conf": {
+ "type": "string",
+ "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline APT configuration, make sure to follow yaml syntax."
+ },
+ "https_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify https APT proxy. https proxy url is specified in the format ``https://[[user][:pass]@]host[:port]/``."
+ },
+ "http_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify http APT proxy. http proxy url is specified in the format ``http://[[user][:pass]@]host[:port]/``."
+ },
+ "proxy": {
+ "type": "string",
+ "description": "Alias for defining a http APT proxy."
+ },
+ "ftp_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format ``ftp://[[user][:pass]@]host[:port]/``."
+ },
+ "sources": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "type": "object",
+ "properties": {
+ "source": {
+ "type": "string"
+ },
+ "keyid": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "keyserver": {
+ "type": "string"
+ },
+ "filename": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ }
+ },
+ "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
+ }
+ }
+ }
+ }
+ },
+ "cc_apt_pipelining": {
+ "type": "object",
+ "properties": {
+ "apt_pipelining": {
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "boolean"},
+ {"type": "string", "enum": ["none", "unchanged", "os"]}
+ ]
+ }
+ }
+ },
+ "cc_bootcmd": {
+ "type": "object",
+ "properties": {
+ "bootcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"}
+ ]
+ },
+ "additionalItems": false,
+ "minItems": 1
+ }
+ }
+ },
+ "cc_byobu": {
+ "type": "object",
+ "properties": {
+ "byobu_by_default": {
+ "type": "string",
+ "enum": [
+ "enable-system",
+ "enable-user",
+ "disable-system",
+ "disable-user",
+ "enable",
+ "disable",
+ "user",
+ "system"
+ ]
+ }
+ }
+ },
+ "cc_ca_certs": {
+ "type": "object",
+ "properties": {
+ "ca_certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ },
+ "ca-certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ }
+ }
+ },
+ "cc_chef": {
+ "type": "object",
+ "properties": {
+ "chef": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "directories": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``"
+ },
+ "validation_cert": {
+ "type": "string",
+ "description": "Optional string to be written to file validation_key. Special value ``system`` means set use existing file."
+ },
+ "validation_key": {
+ "type": "string",
+ "default": "/etc/chef/validation.pem",
+ "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``"
+ },
+ "firstboot_path": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``"
+ },
+ "exec": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set true if we should run or not run chef (defaults to false, unless a gem installed is requested where this will then default to true)."
+ },
+ "client_key": {
+ "type": "string",
+ "default": "/etc/chef/client.pem",
+ "description": "Optional path for client_cert. Default to ``/etc/chef/client.pem``."
+ },
+ "encrypted_data_bag_secret": {
+ "type": "string",
+ "default": null,
+ "description": "Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to null, meaning that chef will have to look at the path ``/etc/chef/encrypted_data_bag_secret`` for it."
+ },
+ "environment": {
+ "type": "string",
+ "default": "_default",
+ "description": "Specifies which environment chef will use. By default, it will use the ``_default`` configuration."
+ },
+ "file_backup_path": {
+ "type": "string",
+ "default": "/var/backups/chef",
+ "description": "Specifies the location in which backup files are stored. By default, it uses the ``/var/backups/chef`` location."
+ },
+ "file_cache_path": {
+ "type": "string",
+ "default": "/var/cache/chef",
+ "description": "Specifies the location in which chef cache files will be saved. By default, it uses the ``/var/cache/chef`` location."
+ },
+ "json_attribs": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Specifies the location in which some chef json data is stored. By default, it uses the ``/etc/chef/firstboot.json`` location."
+ },
+ "log_level": {
+ "type": "string",
+ "default": ":info",
+ "description": "Defines the level of logging to be stored in the log file. By default this value is set to ``:info``."
+ },
+ "log_location": {
+ "type": "string",
+ "default": "/var/log/chef/client.log",
+ "description": "Specifies the location of the chef lof file. By default, the location is specified at ``/var/log/chef/client.log``."
+ },
+ "node_name": {
+ "type": "string",
+ "description": "The name of the node to run. By default, we will use th instance id as the node name."
+ },
+ "omnibus_url": {
+ "type": "string",
+ "default": "https://www.chef.io/chef/install.sh",
+ "description": "Omnibus URL if chef should be installed through Omnibus. By default, it uses the ``https://www.chef.io/chef/install.sh``."
+ },
+ "omnibus_url_retries": {
+ "type": "integer",
+ "default": 5,
+ "description": "The number of retries that will be attempted to reach the Omnibus URL. Default is 5."
+ },
+ "omnibus_version": {
+ "type": "string",
+ "description": "Optional version string to require for omnibus install."
+ },
+ "pid_file": {
+ "type": "string",
+ "default": "/var/run/chef/client.pid",
+ "description": "The location in which a process identification number (pid) is saved. By default, it saves in the ``/var/run/chef/client.pid`` location."
+ },
+ "server_url": {
+ "type": "string",
+ "description": "The URL for the chef server"
+ },
+ "show_time": {
+ "type": "boolean",
+ "default": true,
+ "description": "Show time in chef logs"
+ },
+ "ssl_verify_mode": {
+ "type": "string",
+ "default": ":verify_none",
+ "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``."
+ },
+ "validation_name": {
+ "type": "string",
+ "description": "The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during the initial Chef Infra Client run."
+ },
+ "force_install": {
+ "type": "boolean",
+ "default": false,
+ "description": "If set to ``true``, forces chef installation, even if it is already installed."
+ },
+ "initial_attributes": {
+ "type": "object",
+ "items": {"type": "string"},
+ "description": "Specify a list of initial attributes used by the cookbooks."
+ },
+ "install_type": {
+ "type": "string",
+ "default": "packages",
+ "enum": [
+ "packages",
+ "gems",
+ "omnibus"
+ ],
+ "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``"
+ },
+ "run_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "A run list for a first boot json."
+ },
+ "chef_license": {
+ "type": "string",
+ "description": "string that indicates if user accepts or not license related to some of chef products"
+ }
+ }
+ }
+ }
+ },
+ "cc_debug": {
+ "type": "object",
+ "properties": {
+ "debug": {
+ "additionalProperties": false,
+ "minProperties": 1,
+ "type": "object",
+ "properties": {
+ "verbose": {
+ "description": "Should always be true for this module",
+ "type": "boolean"
+ },
+ "output": {
+ "description": "Location to write output. Defaults to console + log",
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "cc_disable_ec2_metadata": {
+ "type": "object",
+ "properties": {
+ "disable_ec2_metadata": {
+ "default": false,
+ "description": "Set true to disable IPv4 routes to EC2 metadata. Default: false.",
+ "type": "boolean"
+ }
+ }
+ },
+ "cc_disk_setup": {
+ "type": "object",
+ "properties": {
+ "device_aliases": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias_name>",
+ "type": "string",
+ "description": "Path to disk to be aliased by this name."
+ }
+ }
+ },
+ "disk_setup": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias name/path>",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "table_type": {
+ "type": "string",
+ "default": "mbr",
+ "enum": ["mbr", "gpt"],
+ "description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``."
+ },
+ "layout": {
+ "type": ["string", "boolean", "array"],
+ "default": false,
+ "oneOf": [
+ {"type": "string", "enum": ["remove"]},
+ {"type": "boolean"},
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type": "array",
+ "items": {"type": "integer"},
+ "minItems": 2,
+ "maxItems": 2
+ }
+ ]
+ }
+ }
+ ],
+ "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). Default: ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "default": false,
+ "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "fs_setup": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "label": {
+ "type": "string",
+ "description": "Label for the filesystem."
+ },
+ "filesystem": {
+ "type": "string",
+ "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``"
+ },
+ "device": {
+ "type": "string",
+ "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
+ },
+ "partition": {
+ "type": ["string", "integer"],
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": ["auto", "any", "none"]
+ }
+ ],
+ "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``"
+ },
+ "replace_fs": {
+ "type": "string",
+ "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``."
+ },
+ "extra_opts": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly."
+ },
+ "cmd": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command."
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_fan": {
+ "type": "object",
+ "properties": {
+ "fan": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["config"],
+ "properties": {
+ "config": {
+ "type": "string",
+ "description": "The fan configuration to use as a single multi-line string"
+ },
+ "config_path": {
+ "type": "string",
+ "default": "/etc/network/fan",
+ "description": "The path to write the fan configuration to. Default: ``/etc/network/fan``"
+ }
+ }
+ }
+ }
+ },
+ "cc_final_message": {
+ "type": "object",
+ "properties": {
+ "final_message": {
+ "type": "string",
+ "description": "The message to display at the end of the run"
+ }
+ }
+ },
+ "cc_growpart": {
+ "type": "object",
+ "properties": {
+ "growpart": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "mode": {
+ "enum": [false, "auto", "growpart", "gpart", "off"],
+ "default": "auto",
+ "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``off`` - Take no action\n\nSpecifying a boolean ``false`` value for this key is deprecated. Use ``off`` instead."
+ },
+ "devices": {
+ "type": "array",
+ "default": ["/"],
+ "items": {
+ "type": "string"
+ },
+ "description": "The devices to resize. Each entry can either be the path to the device's mountpoint in the filesystem or a path to the block device in '/dev'. Default: ``[/]``"
+ },
+ "ignore_growroot_disabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "If ``true``, ignore the presence of ``/etc/growroot-disabled``. If ``false`` and the file exists, then don't resize. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "cc_grub_dpkg": {
+ "type": "object",
+ "properties": {
+ "grub_dpkg": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to configure which device is used as the target for grub installation. Default: ``true``"
+ },
+ "grub-pc/install_devices": {
+ "type": "string",
+ "description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device"
+ },
+ "grub-pc/install_devices_empty": {
+ "type": ["string", "boolean"],
+ "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``. Using a non-boolean value for this field is deprecated."
+ }
+ }
+ },
+ "grub-dpkg": {
+ "type": "object",
+ "description": "DEPRECATED: Use ``grub_dpkg`` instead"
+ }
+ }
+ },
+ "cc_install_hotplug": {
+ "type": "object",
+ "properties": {
+ "updates": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "network": {
+ "type": "object",
+ "required": ["when"],
+ "additionalProperties": false,
+ "properties": {
+ "when": {
+ "type": "array",
+ "additionalProperties": false,
+ "items": {
+ "type": "string",
+ "additionalProperties": false,
+ "enum": [
+ "boot-new-instance",
+ "boot-legacy",
+ "boot",
+ "hotplug"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_keyboard": {
+ "type": "object",
+ "properties": {
+ "keyboard": {
+ "type": "object",
+ "properties": {
+ "layout": {
+ "type": "string",
+ "description": "Required. Keyboard layout. Corresponds to XKBLAYOUT."
+ },
+ "model": {
+ "type": "string",
+ "default": "pc105",
+ "description": "Optional. Keyboard model. Corresponds to XKBMODEL. Default: ``pc105``."
+ },
+ "variant": {
+ "type": "string",
+ "description": "Optional. Keyboard variant. Corresponds to XKBVARIANT."
+ },
+ "options": {
+ "type": "string",
+ "description": "Optional. Keyboard options. Corresponds to XKBOPTIONS."
+ }
+ },
+ "required": ["layout"],
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_keys_to_console": {
+ "type": "object",
+ "properties": {
+ "ssh": {
+ "type": "object",
+ "properties": {
+ "emit_keys_to_console": {
+ "type": "boolean",
+ "default": true,
+ "description": "Set false to avoid printing SSH keys to system console. Default: ``true``."
+ }
+ },
+ "additionalProperties": false,
+ "required": ["emit_keys_to_console"]
+ },
+ "ssh_key_console_blacklist": {
+ "type": "array",
+ "default": ["ssh-dss"],
+ "description": "Avoid printing matching SSH key types to the system console.",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+ "ssh_fp_console_blacklist": {
+ "type": "array",
+ "description": "Avoid printing matching SSH fingerprints to the system console.",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ }
+ }
+ },
+ "cc_landscape": {
+ "type": "object",
+ "properties": {
+ "landscape": {
+ "type": "object",
+ "required": ["client"],
+ "properties": {
+ "client": {
+ "type": "object",
+ "properties": {
+ "url": {
+ "type": "string",
+ "default": "https://landscape.canonical.com/message-system",
+ "description": "The Landscape server URL to connect to. Default: ``https://landscape.canonical.com/message-system``."
+ },
+ "ping_url": {
+ "type": "string",
+ "default": "https://landscape.canonical.com/ping",
+ "description": "The URL to perform lightweight exchange initiation with. Default: ``https://landscape.canonical.com/ping``."
+ },
+ "data_path": {
+ "type": "string",
+ "default": "/var/lib/landscape/client",
+ "description": "The directory to store data files in. Default: ``/var/lib/land‐scape/client/``."
+ },
+ "log_level": {
+ "type": "string",
+ "default": "info",
+ "enum": ["debug", "info", "warning", "error", "critical"],
+ "description": "The log level for the client. Default: ``info``."
+ },
+ "computer_tite": {
+ "type": "string",
+ "description": "The title of this computer."
+ },
+ "account_name": {
+ "type": "string",
+ "description": "The account this computer belongs to."
+ },
+ "registration_key": {
+ "type": "string",
+ "description": "The account-wide key used for registering clients."
+ },
+ "tags": {
+ "type": "string",
+ "pattern": "^[-_0-9a-zA-Z]+(,[-_0-9a-zA-Z]+)*$",
+ "description": "Comma separated list of tag names to be sent to the server."
+ },
+ "http_proxy": {
+ "type": "string",
+ "description": "The URL of the HTTP proxy, if one is needed."
+ },
+ "https_proxy": {
+ "type": "string",
+ "description": "The URL of the HTTPS proxy, if one is needed."
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_locale": {
+ "properties": {
+ "locale": {
+ "type": "string",
+ "description": "The locale to set as the system's locale (e.g. ar_PS)"
+ },
+ "locale_configfile": {
+ "type": "string",
+ "description": "The file in which to write the locale configuration (defaults to the distro's default location)"
+ }
+ }
+ },
+ "cc_lxd": {
+ "type": "object",
+ "properties": {
+ "lxd": {
+ "type": "object",
+ "minProperties": 1,
+ "properties": {
+ "init": {
+ "type": "object",
+ "properties": {
+ "network_address": {
+ "type": "string",
+ "description": "IP address for LXD to listen on"
+ },
+ "network_port": {
+ "type": "integer",
+ "description": "Network port to bind LXD to."
+ },
+ "storage_backend": {
+ "type": "string",
+ "enum": ["zfs", "dir"],
+ "default": "dir",
+ "description": "Storage backend to use. Default: ``dir``."
+ },
+ "storage_create_device": {
+ "type": "string",
+ "description": "Setup device based storage using DEVICE"
+ },
+ "storage_create_loop": {
+ "type": "integer",
+ "description": "Setup loop based storage with SIZE in GB"
+ },
+ "storage_pool": {
+ "type": "string",
+ "description": "Name of storage pool to use or create"
+ },
+ "trust_password": {
+ "type": "string",
+ "description": "The password required to add new clients"
+ }
+ }
+ },
+ "bridge": {
+ "type": "object",
+ "required": ["mode"],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "description": "Whether to setup LXD bridge, use an existing bridge by ``name`` or create a new bridge. `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching ``name`` and `new` will create a new bridge.",
+ "enum": ["none", "existing", "new"]
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the LXD network bridge to attach or create. Default: ``lxdbr0``.",
+ "default": "lxdbr0"
+ },
+ "ipv4_address": {
+ "type": "string",
+ "description": "IPv4 address for the bridge. If set, ``ipv4_netmask`` key required."
+ },
+ "ipv4_netmask": {
+ "type": "integer",
+ "description": "Prefix length for the ``ipv4_address`` key. Required when ``ipv4_address`` is set."
+ },
+ "ipv4_dhcp_first": {
+ "type": "string",
+ "description": "First IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_last`` key to set LXC ``ipv4.dhcp.ranges``."
+ },
+ "ipv4_dhcp_last": {
+ "type": "string",
+ "description": "Last IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_first`` key to set LXC ``ipv4.dhcp.ranges``."
+ },
+ "ipv4_dhcp_leases": {
+ "type": "integer",
+ "description": "Number of DHCP leases to allocate within the range. Automatically calculated based on `ipv4_dhcp_first` and `ipv4_dchp_last` when unset."
+ },
+ "ipv4_nat": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to NAT the IPv4 traffic allowing for a routed IPv4 network. Default: ``false``."
+ },
+ "ipv6_address": {
+ "type": "string",
+ "description": "IPv6 address for the bridge (CIDR notation). When set, ``ipv6_netmask`` key is required. When absent, no IPv6 will be configured."
+ },
+ "ipv6_netmask": {
+ "type": "integer",
+ "description": "Prefix length for ``ipv6_address`` provided. Required when ``ipv6_address`` is set."
+ },
+ "ipv6_nat": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to NAT. Default: ``false``."
+ },
+ "domain": {
+ "type": "string",
+ "description": "Domain to advertise to DHCP clients and use for DNS resolution."
+ }
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_mcollective": {
+ "type": "object",
+ "properties": {
+ "mcollective": {
+ "type": "object",
+ "properties": {
+ "conf": {
+ "type": "object",
+ "properties": {
+ "public-cert": {
+ "type": "string",
+ "description": "Optional value of server public certificate which will be written to ``/etc/mcollective/ssl/server-public.pem``"
+ },
+ "private-cert": {
+ "type": "string",
+ "description": "Optional value of server private certificate which will be written to ``/etc/mcollective/ssl/server-private.pem``"
+ }
+ },
+ "patternProperties": {
+ "^.+$": {
+ "description": "Optional config key: value pairs which will be appended to ``/etc/mcollective/server.cfg``.",
+ "oneOf": [
+ {"type": "boolean"},
+ {"type": "integer"},
+ {"type": "string"}
+ ]
+ }
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_migrator": {
+ "type": "object",
+ "properties": {
+ "migrate": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to migrate legacy cloud-init semaphores to new format. Default: ``true``"
+ }
+ }
+ },
+ "cc_mounts": {
+ "type": "object",
+ "properties": {
+ "mounts": {
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "maxItems": 6
+ },
+ "description": "List of lists. Each inner list entry is a list of ``/etc/fstab`` mount declarations of the format: [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]. A mount declaration with less than 6 items will get remaining values from ``mount_default_fields``. A mount declaration with only `fs_spec` and no `fs_file` mountpoint will be skipped.",
+ "minItems": 1
+ },
+ "mount_default_fields": {
+ "type": "array",
+ "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.requires=cloud-init.service,_netdev``",
+ "default": [null, null, "auto", "defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"],
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "null"}
+ ]
+ },
+ "minItems": 6,
+ "maxItems": 6
+ },
+ "swap": {
+ "type": "object",
+ "properties": {
+ "filename": {
+ "type": "string",
+ "description": "Path to the swap file to create"
+ },
+ "size": {
+ "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format <float_size><units> where units are one of B, K, M, G or T.",
+ "oneOf": [
+ {"enum": ["auto"]},
+ {"type": "integer"},
+ {"type": "string", "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"}
+ ]
+ },
+ "maxsize": {
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "string", "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"}
+ ],
+ "description": "The maxsize in bytes of the swap file"
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_ntp": {
+ "type": "object",
+ "properties": {
+ "ntp": {
+ "type": ["null", "object"],
+ "properties": {
+ "pools": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "hostname"
+ },
+ "uniqueItems": true,
+ "description": "List of ntp pools. If both pools and servers are\nempty, 4 default pool servers will be provided of\nthe format ``{0-3}.{distro}.pool.ntp.org``. NOTE:\nfor Alpine Linux when using the Busybox NTP client\nthis setting will be ignored due to the limited\nfunctionality of Busybox's ntpd."
+ },
+ "servers": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "hostname"
+ },
+ "uniqueItems": true,
+ "description": "List of ntp servers. If both pools and servers are\nempty, 4 default pool servers will be provided with\nthe format ``{0-3}.{distro}.pool.ntp.org``."
+ },
+ "ntp_client": {
+ "type": "string",
+ "default": "auto",
+ "description": "Name of an NTP client to use to configure system NTP.\nWhen unprovided or 'auto' the default client preferred\nby the distribution will be used. The following\nbuilt-in client names can be used to override existing\nconfiguration defaults: chrony, ntp, ntpdate,\nsystemd-timesyncd."
+ },
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "Attempt to enable ntp clients if set to True. If set\nto False, ntp client will not be configured or\ninstalled"
+ },
+ "config": {
+ "description": "Configuration settings or overrides for the\n``ntp_client`` specified.",
+ "type": "object",
+ "properties": {
+ "confpath": {
+ "type": "string",
+ "description": "The path to where the ``ntp_client``\nconfiguration is written."
+ },
+ "check_exe": {
+ "type": "string",
+ "description": "The executable name for the ``ntp_client``.\nFor example, ntp service ``check_exe`` is\n'ntpd' because it runs the ntpd binary."
+ },
+ "packages": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true,
+ "description": "List of packages needed to be installed for the\nselected ``ntp_client``."
+ },
+ "service_name": {
+ "type": "string",
+ "description": "The systemd or sysvinit service name used to\nstart and stop the ``ntp_client``\nservice."
+ },
+ "template": {
+ "type": "string",
+ "description": "Inline template allowing users to define their\nown ``ntp_client`` configuration template.\nThe value must start with '## template:jinja'\nto enable use of templating support.\n"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_package_update_upgrade_install": {
+ "type": "object",
+ "properties": {
+ "packages": {
+ "type": "array",
+ "description": "A list of packages to install. Each entry in the list can be either a package name or a list with two entries, the first being the package name and the second being the specific package version to install.",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}, "minItems": 2, "maxItems": 2},
+ {"type": "string"}
+ ]
+ },
+ "minItems": 1
+ },
+ "package_update": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to update packages. Happens before upgrade or install. Default: ``false``"
+ },
+ "package_upgrade": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to upgrade packages. Happens before install. Default: ``false``"
+ },
+ "package_reboot_if_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``"
+ },
+ "apt_update": {
+ "type": "boolean",
+ "default": false,
+ "description": "DEPRECATED. Use ``package_update``. Default: ``false``",
+ "deprecated": true
+ },
+ "apt_upgrade": {
+ "type": "boolean",
+ "default": false,
+ "description": "DEPRECATED. Use ``package_upgrade``. Default: ``false``",
+ "deprecated": true
+ },
+ "apt_reboot_if_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "DEPRECATED. Use ``package_reboot_if_required``. Default: ``false``",
+ "deprecated": true
+ }
+ }
+ },
+ "cc_phone_home": {
+ "type": "object",
+ "properties": {
+ "phone_home": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["url"],
+ "properties": {
+ "url": {
+ "type": "string",
+ "format": "uri",
+ "description": "The URL to send the phone home data to."
+ },
+ "post": {
+ "description": "A list of keys to post or ``all``. Default: ``all``",
+ "oneOf": [
+ {"enum": ["all"]},
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "pub_key_dsa",
+ "pub_key_rsa",
+ "pub_key_ecdsa",
+ "pub_key_ed25519",
+ "instance_id",
+ "hostname",
+ "fqdn"
+ ]
+ }
+ }
+ ]
+ },
+ "tries": {
+ "type": "integer",
+ "description": "The number of times to try sending the phone home data. Default: ``10``",
+ "default": 10
+ }
+ }
+ }
+ }
+ },
+ "cc_power_state_change": {
+ "type": "object",
+ "properties": {
+ "power_state": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["mode"],
+ "properties": {
+ "delay": {
+ "description": "Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer specifying the number of minutes to delay. Default: ``now``",
+ "default": "now",
+ "oneOf": [
+ {"type": "integer", "minimum": 0},
+ {"type": "string", "pattern": "^\\+?[0-9]+$"},
+ {"enum": ["now"]}
+ ]
+ },
+ "mode": {
+ "description": "Must be one of ``poweroff``, ``halt``, or ``reboot``.",
+ "type": "string",
+ "enum": ["poweroff", "reboot", "halt"]
+ },
+ "message": {
+ "description": "Optional message to display to the user when the system is powering off or rebooting.",
+ "type": "string"
+ },
+ "timeout": {
+ "description": "Time in seconds to wait for the cloud-init process to finish before executing shutdown. Default: ``30``",
+ "type": "integer",
+ "default": 30
+ },
+ "condition": {
+ "description": "Apply state change only if condition is met. May be boolean true (always met), false (never met), or a command string or list to be executed. For command formatting, see the documentation for ``cc_runcmd``. If exit code is 0, condition is met, otherwise not. Default: ``true``",
+ "default": true,
+ "oneOf": [
+ {"type": "string"},
+ {"type": "boolean"},
+ {"type": "array"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "cc_puppet": {
+ "type": "object",
+ "properties": {
+ "puppet": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "install": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether or not to install puppet. Setting to ``false`` will result in an error if puppet is not already present on the system. Default: ``true``"
+ },
+ "version": {
+ "type": "string",
+ "description": "Optional version to pass to the installer script or package manager. If unset, the latest version from the repos will be installed."
+ },
+ "install_type": {
+ "type": "string",
+ "description": "Valid values are ``packages`` and ``aio``. Agent packages from the puppetlabs repositories can be installed by setting ``aio``. Based on this setting, the default config/SSL/CSR paths will be adjusted accordingly. Default: ``packages``",
+ "enum": ["packages", "aio"],
+ "default": "packages"
+ },
+ "collection": {
+ "type": "string",
+ "description": "Puppet collection to install if ``install_type`` is ``aio``. This can be set to one of ``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly counterparts) in order to install specific release streams."
+ },
+ "aio_install_url": {
+ "type": "string",
+ "description": "If ``install_type`` is ``aio``, change the url of the install script."
+ },
+ "cleanup": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to remove the puppetlabs repo after installation if ``install_type`` is ``aio`` Default: ``true``"
+ },
+ "conf_file": {
+ "type": "string",
+ "description": "The path to the puppet config file. Default depends on ``install_type``"
+ },
+ "ssl_dir": {
+ "type": "string",
+ "description": "The path to the puppet SSL directory. Default depends on ``install_type``"
+ },
+ "csr_attributes_path": {
+ "type": "string",
+ "description": "The path to the puppet csr attributes file. Default depends on ``install_type``"
+ },
+ "package_name": {
+ "type": "string",
+ "description": "Name of the package to install if ``install_type`` is ``packages``. Default: ``puppet``"
+ },
+ "exec": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether or not to run puppet after configuration finishes. A single manual run can be triggered by setting ``exec`` to ``true``, and additional arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by default the agent will execute with the ``--test`` flag). Default: ``false``"
+ },
+ "exec_args": {
+ "type": "array",
+ "description": "A list of arguments to pass to 'puppet agent' if 'exec' is true Default: ``['--test']``",
+ "items": {
+ "type": "string"
+ }
+ },
+ "start_service": {
+ "type": "boolean",
+ "default": true,
+ "description": "By default, the puppet service will be automatically enabled after installation and set to automatically start on boot. To override this in favor of manual puppet execution set ``start_service`` to ``false``"
+ },
+ "conf": {
+ "type": "object",
+ "description": "Every key present in the conf object will be added to puppet.conf. As such, section names should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid puppet configuration options. The configuration is specified as a dictionary containing high-level ``<section>`` keys and lists of ``<key>=<value>`` pairs within each section. The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding to the instance id and fqdn of the machine respectively.\n\n``ca_cert`` is a special case. It won't be added to puppet.conf. It holds the puppetserver certificate in pem format. It should be a multi-line string (using the | yaml notation for multi-line strings).",
+ "additionalProperties": false,
+ "properties": {
+ "main": {
+ "type": "object"
+ },
+ "server": {
+ "type": "object"
+ },
+ "agent": {
+ "type": "object"
+ },
+ "user": {
+ "type": "object"
+ },
+ "ca_cert": {
+ "type": "string"
+ }
+ }
+ },
+ "csr_attributes": {
+ "type": "object",
+ "description": "create a ``csr_attributes.yaml`` file for CSR attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html",
+ "additionalProperties": false,
+ "properties": {
+ "custom_attributes": {
+ "type": "object"
+ },
+ "extension_requests": {
+ "type": "object"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_resizefs": {
+ "type": "object",
+ "properties": {
+ "resize_rootfs": {
+ "enum": [true, false, "noblock"],
+ "description": "Whether to resize the root partition. ``noblock`` will resize in the background. Default: ``true``"
+ }
+ }
+ },
+ "cc_resolv_conf": {
+ "type": "object",
+ "properties": {
+ "manage_resolv_conf": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to manage the resolv.conf file. ``resolv_conf`` block will be ignored unless this is set to ``true``. Default: ``false``"
+ },
+ "resolv_conf": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "nameservers": {
+ "type": "array",
+ "description": "A list of nameservers to use to be added as ``nameserver`` lines"
+ },
+ "searchdomains": {
+ "type": "array",
+ "description": "A list of domains to be added ``search`` line"
+ },
+ "domain": {
+ "type": "string",
+ "description": "The domain to be added as ``domain`` line"
+ },
+ "sortlist": {
+ "type": "array",
+ "description": "A list of IP addresses to be added to ``sortlist`` line"
+ },
+ "options": {
+ "type": "object",
+ "description": "Key/value pairs of options to go under ``options`` heading. A unary option should be specified as ``true``"
+ }
+ }
+ }
+ }
+ },
+ "cc_rh_subscription": {
+ "type": "object",
+ "properties": {
+ "rh_subscription": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "username": {
+ "type": "string",
+ "description": "The username to use. Must be used with password. Should not be used with ``activation-key`` or ``org``"
+ },
+ "password": {
+ "type": "string",
+ "description": "The password to use. Must be used with username. Should not be used with ``activation-key`` or ``org``"
+ },
+ "activation-key": {
+ "type": "string",
+ "description": "The activation key to use. Must be used with ``org``. Should not be used with ``username`` or ``password``"
+ },
+ "org": {
+ "type": "integer",
+ "description": "The organization number to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``"
+ },
+ "auto-attach": {
+ "type": "boolean",
+ "description": "Whether to attach subscriptions automatically"
+ },
+ "service-level": {
+ "type": "string",
+ "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used"
+ },
+ "add-pool": {
+ "type": "array",
+ "description": "A list of pools ids add to the subscription",
+ "items": {
+ "type": "string"
+ }
+ },
+ "enable-repo": {
+ "type": "array",
+ "description": "A list of repositories to enable",
+ "items": {
+ "type": "string"
+ }
+ },
+ "disable-repo": {
+ "type": "array",
+ "description": "A list of repositories to disable",
+ "items": {
+ "type": "string"
+ }
+ },
+ "rhsm-baseurl": {
+ "type": "string",
+ "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``"
+ },
+ "server-hostname": {
+ "type": "string",
+ "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``"
+ }
+ }
+ }
+ }
+ },
+ "cc_rsyslog": {
+ "type": "object",
+ "properties": {
+ "rsyslog": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "config_dir": {
+ "type": "string",
+ "description": "The directory where rsyslog configuration files will be written. Default: ``/etc/rsyslog.d``"
+ },
+ "config_filename": {
+ "type": "string",
+ "description": "The name of the rsyslog configuration file. Default: ``20-cloud-config.conf``"
+ },
+ "configs": {
+ "type": "array",
+ "description": "Each entry in ``configs`` is either a string or an object. Each config entry contains a configuration string and a file to write it to. For config entries that are an object, ``filename`` sets the target filename and ``content`` specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of the ``config_filename`` key is used. A file with the selected filename will be written inside the directory specified by ``config_dir``.",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["content"],
+ "properties": {
+ "filename": {
+ "type": "string"
+ },
+ "content": {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ }
+ },
+ "remotes": {
+ "type": "object",
+ "description": "Each key is the name for an rsyslog remote entry. Each value holds the contents of the remote config for rsyslog. The config consists of the following parts:\n\n- filter for log messages (defaults to ``*.*``)\n\n- optional leading ``@`` or ``@@``, indicating udp and tcp respectively (defaults to ``@``, for udp)\n\n- ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]`` format, (e.g. ``@[fd00::1]:514``)\n\n- optional port number (defaults to ``514``)\n\nThis module will provide sane defaults for any part of the remote entry that is not specified, so in most cases remote hosts can be specified just using ``<name>: <address>``."
+ },
+ "service_reload_command": {
+ "description": "The command to use to reload the rsyslog service after the config has been updated. If this is set to ``auto``, then an appropriate command for the distro will be used. This is the default behavior. To manually set the command, use a list of command args (e.g. ``[systemctl, restart, rsyslog]``).",
+ "oneOf": [
+ {"enum": ["auto"]},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "cc_runcmd": {
+ "type": "object",
+ "properties": {
+ "runcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ {"type": "null"}
+ ]
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "cc_salt_minion": {
+ "type": "object",
+ "properties": {
+ "salt_minion": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "pkg_name": {
+ "type": "string",
+ "description": "Package name to install. Default: ``salt-minion``"
+ },
+ "service_name": {
+ "type": "string",
+ "description": "Service name to enable. Default: ``salt-minion``"
+ },
+ "config_dir": {
+ "type": "string",
+ "description": "Directory to write config files to. Default: ``/etc/salt``"
+ },
+ "conf": {
+ "type": "object",
+ "description": "Configuration to be written to `config_dir`/minion"
+ },
+ "grains": {
+ "type": "object",
+ "description": "Configuration to be written to `config_dir`/grains"
+ },
+ "public_key": {
+ "type": "string",
+ "description": "Public key to be used by the salt minion"
+ },
+ "private_key": {
+ "type": "string",
+ "description": "Private key to be used by salt minion"
+ },
+ "pki_dir": {
+ "type": "string",
+ "description": "Directory to write key files. Default: `config_dir`/pki/minion"
+ }
+ }
+ }
+ }
+ },
+ "cc_scripts_vendor": {
+ "type": "object",
+ "properties": {
+ "vendor_data": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": ["boolean", "string"],
+ "description": "Whether vendor data is enabled or not. Use of string for this value is DEPRECATED. Default: ``true``"
+ },
+ "prefix": {
+ "type": ["array", "string"],
+ "items": {"type": ["string", "integer"]},
+ "description": "The command to run before any vendor scripts. Its primary use case is for profiling a script, not to prevent its run"
+ }
+ }
+ }
+ }
+ },
+ "cc_seed_random": {
+ "type": "object",
+ "properties": {
+ "random_seed": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "file": {
+ "type": "string",
+ "default": "/dev/urandom",
+ "description": "File to write random data to. Default: ``/dev/urandom``"
+ },
+ "data": {
+ "type": "string",
+ "description": "This data will be written to ``file`` before data from the datasource. When using a multiline value or specifying binary data, be sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format specifiers when appropriate"
+ },
+ "encoding": {
+ "type": "string",
+ "default": "raw",
+ "enum": ["raw", "base64", "b64", "gzip", "gz"],
+ "description": "Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``"
+ },
+ "command": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "Execute this command to seed random. The command will have RANDOM_SEED_FILE in its environment set to the value of ``file`` above."
+ },
+ "command_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, and ``command`` is not available to be run then an exception is raised and cloud-init will record failure. Otherwise, only debug error is mentioned. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "cc_set_hostname": {
+ "type": "object",
+ "properties": {
+ "preserve_hostname": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, the hostname will not be changed. Default: ``false``"
+ },
+ "hostname": {
+ "type": "string",
+ "description": "The hostname to set"
+ },
+ "fqdn": {
+ "type": "string",
+ "description": "The fully qualified domain name to set"
+ },
+ "prefer_fqdn_over_hostname": {
+ "type": "boolean",
+ "description": "If true, the fqdn will be used if it is set. If false, the hostname will be used. If unset, the result is distro-dependent"
+ }
+ }
+ },
+ "cc_set_passwords": {
+ "type": "object",
+ "properties": {
+ "ssh_pwauth": {
+ "oneOf": [
+ {"type": "boolean"},
+ {"type": "string"}
+ ],
+ "description": "Sets whether or not to accept password authentication. ``true`` will enable password auth. ``false`` will disable. Default is to leave the value unchanged. Use of non-boolean values for this field is DEPRECATED and will result in an error in a future version of cloud-init."
+ },
+ "chpasswd": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "expire": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``"
+ },
+ "list": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "^.+:.+$"
+ }}
+ ],
+ "minItems": 1,
+ "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.\n\nUse of a multiline string for this field is DEPRECATED and will result in an error in a future version of cloud-init."
+ }
+ }
+ },
+ "password": {
+ "type": "string",
+ "description": "Set the default user's password. Ignored if ``chpasswd`` ``list`` is used"
+ }
+ }
+ },
+ "cc_snap": {
+ "type": "object",
+ "properties": {
+ "snap": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "assertions": {
+ "type": ["object", "array"],
+ "description": "Properly-signed snap assertions which will run before and snap ``commands``.",
+ "items": {"type": "string"},
+ "additionalItems": false,
+ "minItems": 1,
+ "minProperties": 1,
+ "uniqueItems": true,
+ "additionalProperties": {"type": "string"}
+ },
+ "commands": {
+ "type": ["object", "array"],
+ "description": "Snap commands to run on the target system",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "additionalItems": false,
+ "minItems": 1,
+ "minProperties": 1,
+ "additionalProperties": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_spacewalk": {
+ "type": "object",
+ "properties": {
+ "spacewalk": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "server": {
+ "type": "string",
+ "description": "The Spacewalk server to use"
+ },
+ "proxy": {
+ "type": "string",
+ "description": "The proxy to use when connecting to Spacewalk"
+ },
+ "activation_key": {
+ "type": "string",
+ "description": "The activation key to use when registering with Spacewalk"
+ }
+ }
+ }
+ }
+ },
+ "cc_ssh_authkey_fingerprints": {
+ "type": "object",
+ "properties": {
+ "no_ssh_fingerprints": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, SSH fingerprints will not be written. Default: ``false``"
+ },
+ "authkey_hash": {
+ "type": "string",
+ "default": "sha256",
+ "description": "The hash type to use when generating SSH fingerprints. Default: ``sha256``"
+ }
+ }
+ },
+ "cc_ssh_import_id": {
+ "type": "object",
+ "properties": {
+ "ssh_import_id": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "description": "The SSH public key to import"
+ }
+ }
+ }
+ },
+ "cc_ssh": {
+ "type": "object",
+ "properties": {
+ "ssh_keys": {
+ "type": "object",
+ "description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the ``ssh_keys`` config dict should have keys in the format ``<key type>_private``, ``<key type>_public``, and, optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``, ``rsa_public: <key>``, and ``rsa_certificate: <key>``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multiline private host keys and certificates, use yaml multiline syntax.",
+ "patternProperties": {
+ "^(dsa|ecdsa|ed25519|rsa)_(public|private|certificate)$": {
+ "label": "<key_type>",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ "ssh_authorized_keys": {
+ "type": "array",
+ "minItems": 1,
+ "description": "The SSH public keys to add ``.ssh/authorized_keys`` in the default user's home directory",
+ "items": {
+ "type": "string"
+ }
+ },
+ "ssh_deletekeys" : {
+ "type": "boolean",
+ "default": true,
+ "description": "Remove host SSH keys. This prevents re-use of a private host key from an image with default host SSH keys. Default: ``true``"
+ },
+ "ssh_genkeytypes": {
+ "type": "array",
+ "description": "The SSH key types to generate. Default: ``[rsa, dsa, ecdsa, ed25519]``",
+ "default": ["dsa", "ecdsa", "ed25519", "rsa"],
+ "minItems": 1,
+ "items": {
+ "type": "string",
+ "enum": ["dsa", "ecdsa", "ed25519", "rsa"]
+ }
+ },
+ "disable_root": {
+ "type": "boolean",
+ "default": true,
+ "description": "Disable root login. Default: ``true``"
+ },
+ "disable_root_opts": {
+ "type": "string",
+ "default": "``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``",
+ "description": "Disable root login options. If ``disable_root_opts`` is specified and contains the string ``$USER``, it will be replaced with the username of the default user. Default: ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``"
+ },
+ "allow_public_ssh_keys": {
+ "type": "boolean",
+ "default": true,
+ "description": "If ``true``, will import the public SSH keys from the datasource's metadata to the user's ``.ssh/authorized_keys`` file. Default: ``true``"
+ },
+ "ssh_quiet_keygen": {
+ "type": "boolean",
+ "default": false,
+ "description": "If ``true``, will suppress the output of key generation to the console. Default: ``false``"
+ },
+ "ssh_publish_hostkeys": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "If true, will read host keys from ``/etc/ssh/*.pub`` and publish them to the datasource (if supported). Default: ``true``"
+ },
+ "blacklist": {
+ "type": "array",
+ "description": "The SSH key types to ignore when publishing. Default: ``[dsa]``",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_timezone": {
+ "type": "object",
+ "properties": {
+ "timezone": {
+ "type": "string",
+ "description": "The timezone to use as represented in /usr/share/zoneinfo"
+ }
+ }
+ },
+ "cc_ubuntu_advantage": {
+ "type": "object",
+ "properties": {
+ "ubuntu_advantage": {
+ "type": "object",
+ "properties": {
+ "enable": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "Optional list of ubuntu-advantage services to enable. Any of: cc-eal, cis, esm-infra, fips, fips-updates, livepatch. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled."
+ },
+ "token": {
+ "type": "string",
+ "description": "Required contract token obtained from https://ubuntu.com/advantage to attach."
+ }
+ },
+ "required": ["token"],
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_ubuntu_drivers": {
+ "type": "object",
+ "properties": {
+ "drivers": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "nvidia": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "license-accepted"
+ ],
+ "properties": {
+ "license-accepted": {
+ "type": "boolean",
+ "description": "Do you accept the NVIDIA driver license?"
+ },
+ "version": {
+ "type": "string",
+ "description": "The version of the driver to install (e.g. \"390\", \"410\"). Defaults to the latest version."
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_update_etc_hosts": {
+ "type": "object",
+ "properties": {
+ "manage_etc_hosts": {
+ "default": false,
+ "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``. DEPRECATED value ``template`` will be dropped, use ``true`` instead.",
+ "enum": [true, false, "template", "localhost"]
+ },
+ "fqdn": {
+ "type": "string",
+ "description": "Optional fully qualified domain name to use when updating ``/etc/hosts``. Preferred over ``hostname`` if both are provided. In absence of ``hostname`` and ``fqdn`` in cloud-config, the ``local-hostname`` value will be used from datasource metadata."
+ },
+ "hostname": {
+ "type": "string",
+ "description": "Hostname to set when rendering ``/etc/hosts``. If ``fqdn`` is set, the hostname extracted from ``fqdn`` overrides ``hostname``."
+ }
+ }
+ },
+ "cc_update_hostname": {
+ "type": "object",
+ "properties": {
+ "preserve_hostname": {
+ "type": "boolean",
+ "default": false,
+ "description": "Do not update system hostname when ``true``. Default: ``false``."
+ },
+ "prefer_fqdn_over_hostname": {
+ "type": "boolean",
+ "default": null,
+ "description": "By default, it is distro-dependent whether cloud-init uses the short hostname or fully qualified domain name when both ``local-hostname` and ``fqdn`` are both present in instance metadata. When set ``true``, use fully qualified domain name if present as hostname instead of short hostname. When set ``false``, use ``hostname`` config value if present, otherwise fallback to ``fqdn``."
+ }
+ }
+ },
+ "cc_users_groups": {
+ "type": "object",
+ "properties": {
+ "groups": {
+ "type": ["string", "object", "array"],
+ "hidden": ["patternProperties"],
+ "$ref": "#/$defs/users_groups.groups_by_groupname",
+ "items": {
+ "type": ["string", "object"],
+ "$ref": "#/$defs/users_groups.groups_by_groupname"
+ },
+ "minItems": 1
+ },
+ "user": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "object", "$ref": "#/$defs/users_groups.user"}
+ ],
+ "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are the same as the ``users`` schema. DEPRECATED: string and types will be removed in a future release. Use ``users`` instead."
+ },
+ "users": {
+ "type": ["string", "array", "object"],
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "object", "$ref": "#/$defs/users_groups.user"}
+ ]
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "cc_write_files": {
+ "type": "object",
+ "properties": {
+ "write_files": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": "Path of the file to which ``content`` is decoded and written"
+ },
+ "content": {
+ "type": "string",
+ "default": "",
+ "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``"
+ },
+ "owner": {
+ "type": "string",
+ "default": "root:root",
+ "description": "Optional owner:group to chown on the file. Default: ``root:root``"
+ },
+ "permissions": {
+ "type": "string",
+ "default": "0o644",
+ "description": "Optional file permissions to set on ``path`` represented as an octal string '0###'. Default: ``0o644``"
+ },
+ "encoding": {
+ "type": "string",
+ "default": "text/plain",
+ "enum": ["gz", "gzip", "gz+base64", "gzip+base64", "gz+b64", "gzip+b64", "b64", "base64", "text/plain"],
+ "description": "Optional encoding type of the content. Default is ``text/plain`` and no content decoding is performed. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64"
+ },
+ "append": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to append ``content`` to existing file if ``path`` exists. Default: ``false``."
+ },
+ "defer": {
+ "type": "boolean",
+ "default": false,
+ "description": "Defer writing the file until 'final' stage, after users were created, and packages were installed. Default: ``false``."
+ }
+ },
+ "required": ["path"],
+ "additionalProperties": false
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "cc_yum_add_repo": {
+ "type": "object",
+ "properties": {
+ "yum_repo_dir": {
+ "type": "string",
+ "default": "/etc/yum.repos.d",
+ "description": "The repo parts directory where individual yum repo config files will be written. Default: ``/etc/yum.repos.d``"
+ },
+ "yum_repos": {
+ "type": "object",
+ "minProperties": 1,
+ "patternProperties": {
+ "^[0-9a-zA-Z -_]+$": {
+ "label": "<repo_name>",
+ "type": "object",
+ "description": "Object keyed on unique yum repo IDs. The key used will be used to write yum repo config files in ``yum_repo_dir``/<repo_key_id>.repo.",
+ "properties": {
+ "baseurl": {
+ "type": "string",
+ "format": "uri",
+ "description": "URL to the directory where the yum repository's 'repodata' directory lives"
+ },
+ "name": {
+ "type": "string",
+ "description": "Optional human-readable name of the yum repo."
+ },
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to enable the repo. Default: ``true``."
+ }
+ },
+ "patternProperties": {
+ "^[0-9a-zA-Z_]+$": {
+ "label": "<yum_config_option>",
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "boolean"},
+ {"type": "string"}
+ ],
+ "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf"
+ }
+ },
+ "required": ["baseurl"]
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_zypper_add_repo": {
+ "type": "object",
+ "properties": {
+ "zypper": {
+ "type": "object",
+ "properties": {
+ "repos": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The unique id of the repo, used when writing /etc/zypp/repos.d/<id>.repo."
+ },
+ "baseurl": {
+ "type": "string",
+ "format": "uri",
+ "description": "The base repositoy URL"
+ }
+ },
+ "required": [
+ "id",
+ "baseurl"
+ ],
+ "additionalProperties": true
+ },
+ "minItems": 1
+ },
+ "config": {
+ "type": "object",
+ "description": "Any supported zypo.conf key is written to ``/etc/zypp/zypp.conf``"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ }
+ }
+ },
+ "allOf": [
+ { "$ref": "#/$defs/cc_apk_configure" },
+ { "$ref": "#/$defs/cc_apt_configure" },
+ { "$ref": "#/$defs/cc_apt_pipelining" },
+ { "$ref": "#/$defs/cc_bootcmd" },
+ { "$ref": "#/$defs/cc_byobu" },
+ { "$ref": "#/$defs/cc_ca_certs" },
+ { "$ref": "#/$defs/cc_chef" },
+ { "$ref": "#/$defs/cc_debug" },
+ { "$ref": "#/$defs/cc_disable_ec2_metadata" },
+ { "$ref": "#/$defs/cc_disk_setup" },
+ { "$ref": "#/$defs/cc_fan" },
+ { "$ref": "#/$defs/cc_final_message"},
+ { "$ref": "#/$defs/cc_growpart"},
+ { "$ref": "#/$defs/cc_grub_dpkg"},
+ { "$ref": "#/$defs/cc_install_hotplug"},
+ { "$ref": "#/$defs/cc_keyboard" },
+ { "$ref": "#/$defs/cc_keys_to_console" },
+ { "$ref": "#/$defs/cc_landscape" },
+ { "$ref": "#/$defs/cc_locale" },
+ { "$ref": "#/$defs/cc_lxd" },
+ { "$ref": "#/$defs/cc_mcollective" },
+ { "$ref": "#/$defs/cc_migrator" },
+ { "$ref": "#/$defs/cc_mounts" },
+ { "$ref": "#/$defs/cc_ntp" },
+ { "$ref": "#/$defs/cc_package_update_upgrade_install" },
+ { "$ref": "#/$defs/cc_phone_home" },
+ { "$ref": "#/$defs/cc_power_state_change"},
+ { "$ref": "#/$defs/cc_puppet"},
+ { "$ref": "#/$defs/cc_resizefs"},
+ { "$ref": "#/$defs/cc_resolv_conf"},
+ { "$ref": "#/$defs/cc_rh_subscription"},
+ { "$ref": "#/$defs/cc_rsyslog"},
+ { "$ref": "#/$defs/cc_runcmd"},
+ { "$ref": "#/$defs/cc_salt_minion"},
+ { "$ref": "#/$defs/cc_scripts_vendor"},
+ { "$ref": "#/$defs/cc_seed_random"},
+ { "$ref": "#/$defs/cc_set_hostname"},
+ { "$ref": "#/$defs/cc_set_passwords"},
+ { "$ref": "#/$defs/cc_snap"},
+ { "$ref": "#/$defs/cc_spacewalk"},
+ { "$ref": "#/$defs/cc_ssh_authkey_fingerprints"},
+ { "$ref": "#/$defs/cc_ssh_import_id"},
+ { "$ref": "#/$defs/cc_ssh"},
+ { "$ref": "#/$defs/cc_timezone"},
+ { "$ref": "#/$defs/cc_ubuntu_advantage"},
+ { "$ref": "#/$defs/cc_ubuntu_drivers"},
+ { "$ref": "#/$defs/cc_update_etc_hosts"},
+ { "$ref": "#/$defs/cc_update_hostname"},
+ { "$ref": "#/$defs/cc_users_groups"},
+ { "$ref": "#/$defs/cc_write_files"},
+ { "$ref": "#/$defs/cc_yum_add_repo"},
+ { "$ref": "#/$defs/cc_zypper_add_repo"}
+ ]
+}
diff --git a/cloudinit/config/schemas/versions.schema.cloud-config.json b/cloudinit/config/schemas/versions.schema.cloud-config.json
new file mode 100644
index 00000000..4ff3b4d1
--- /dev/null
+++ b/cloudinit/config/schemas/versions.schema.cloud-config.json
@@ -0,0 +1,18 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$id": "https://raw.githubusercontent.com/canonical/cloud-init/main/cloudinit/config/schemas/versions.schema.cloud-config.json",
+ "oneOf": [
+ {
+ "allOf": [
+ {
+ "properties": {
+ "version": {
+ "enum": ["22.2", "v1"]
+ }
+ }
+ },
+ {"$ref": "./schema-cloud-config-v1.json"}
+ ]
+ }
+ ]
+}