summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2018-07-09 14:16:03 -0600
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2018-07-09 21:05:08 +0000
commitc14693fa75c77e6b4f824a687fb52e0eee7c6d67 (patch)
tree46adf92b5e89173d9a366b106faa672fb4699ccc
parent7140b42117422de6d4567b0284fbbcc12cecd759 (diff)
downloadcloud-init-git-c14693fa75c77e6b4f824a687fb52e0eee7c6d67.tar.gz
18.3-9-g2e62cb8a-0ubuntu1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--cloudinit/config/cc_bootcmd.py8
-rw-r--r--cloudinit/config/cc_runcmd.py5
-rw-r--r--cloudinit/config/cc_write_files.py7
-rw-r--r--cloudinit/event.py17
-rw-r--r--cloudinit/gpg.py52
-rw-r--r--cloudinit/sources/__init__.py78
-rw-r--r--cloudinit/sources/tests/test_init.py83
-rw-r--r--cloudinit/stages.py14
-rw-r--r--cloudinit/tests/test_gpg.py54
-rw-r--r--cloudinit/tests/test_stages.py231
-rw-r--r--cloudinit/tests/test_util.py69
-rw-r--r--cloudinit/util.py28
-rw-r--r--debian/changelog16
-rw-r--r--doc/examples/cloud-config-run-cmds.txt5
-rw-r--r--doc/examples/cloud-config.txt5
-rw-r--r--doc/rtd/topics/format.rst2
-rw-r--r--integration-requirements.txt2
-rw-r--r--tests/cloud_tests/testcases/modules/salt_minion.py38
-rw-r--r--tests/cloud_tests/testcases/modules/salt_minion.yaml49
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py4
-rwxr-xr-xtools/run-container18
21 files changed, 656 insertions, 129 deletions
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index db64f0a6..6813f534 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -42,7 +42,13 @@ schema = {
.. note::
bootcmd should only be used for things that could not be done later
- in the boot process."""),
+ in the boot process.
+
+ .. note::
+
+ when writing files, do not use /tmp dir as it races with
+ systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
+ """),
'distros': distros,
'examples': [dedent("""\
bootcmd:
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index b6f6c807..1f75d6c5 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -42,6 +42,11 @@ schema = {
all commands must be proper yaml, so you have to quote any characters
yaml would eat (':' can be problematic)
+
+ .. note::
+
+ when writing files, do not use /tmp dir as it races with
+ systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
"""),
'distros': distros,
'examples': [dedent("""\
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 54ae3a68..31d1db61 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -15,9 +15,14 @@ binary gzip data can be specified and will be decoded before being written.
.. note::
if multiline data is provided, care should be taken to ensure that it
- follows yaml formatting standargs. to specify binary data, use the yaml
+ follows yaml formatting standards. to specify binary data, use the yaml
option ``!!binary``
+.. note::
+ Do not write files under /tmp during boot because of a race with
+ systemd-tmpfiles-clean that can cause temp files to get cleaned during
+ the early boot process. Use /run/somedir instead to avoid race LP:1707222.
+
**Internal name:** ``cc_write_files``
**Module frequency:** per instance
diff --git a/cloudinit/event.py b/cloudinit/event.py
new file mode 100644
index 00000000..f7b311fb
--- /dev/null
+++ b/cloudinit/event.py
@@ -0,0 +1,17 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Classes and functions related to event handling."""
+
+
+# Event types which can generate maintenance requests for cloud-init.
+class EventType(object):
+ BOOT = "System boot"
+ BOOT_NEW_INSTANCE = "New instance first boot"
+
+ # TODO: Cloud-init will grow support for the follow event types:
+ # UDEV
+ # METADATA_CHANGE
+ # USER_REQUEST
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index d58d73e0..7fe17a2e 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -10,6 +10,8 @@
from cloudinit import log as logging
from cloudinit import util
+import time
+
LOG = logging.getLogger(__name__)
@@ -25,16 +27,46 @@ def export_armour(key):
return armour
-def recv_key(key, keyserver):
- """Receive gpg key from the specified keyserver"""
- LOG.debug('Receive gpg key "%s"', key)
- try:
- util.subp(["gpg", "--keyserver", keyserver, "--recv", key],
- capture=True)
- except util.ProcessExecutionError as error:
- raise ValueError(('Failed to import key "%s" '
- 'from server "%s" - error %s') %
- (key, keyserver, error))
+def recv_key(key, keyserver, retries=(1, 1)):
+ """Receive gpg key from the specified keyserver.
+
+ Retries are done by default because keyservers can be unreliable.
+ Additionally, there is no way to determine the difference between
+ a non-existant key and a failure. In both cases gpg (at least 2.2.4)
+ exits with status 2 and stderr: "keyserver receive failed: No data"
+ It is assumed that a key provided to cloud-init exists on the keyserver
+ so re-trying makes better sense than failing.
+
+ @param key: a string key fingerprint (as passed to gpg --recv-keys).
+ @param keyserver: the keyserver to request keys from.
+ @param retries: an iterable of sleep lengths for retries.
+ Use None to indicate no retries."""
+ LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver)
+ cmd = ["gpg", "--keyserver=%s" % keyserver, "--recv-keys", key]
+ if retries is None:
+ retries = []
+ trynum = 0
+ error = None
+ sleeps = iter(retries)
+ while True:
+ trynum += 1
+ try:
+ util.subp(cmd, capture=True)
+ LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
+ key, keyserver, trynum)
+ return
+ except util.ProcessExecutionError as e:
+ error = e
+ try:
+ naplen = next(sleeps)
+ LOG.debug(
+ "Import failed with exit code %d, will try again in %ss",
+ error.exit_code, naplen)
+ time.sleep(naplen)
+ except StopIteration:
+ raise ValueError(
+ ("Failed to import key '%s' from keyserver '%s' "
+ "after %d tries: %s") % (key, keyserver, trynum, error))
def delete_key(key):
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 90d74575..f424316a 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -19,6 +19,7 @@ from cloudinit.atomic_helper import write_json
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.event import EventType
from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
@@ -102,6 +103,25 @@ class DataSource(object):
url_timeout = 10 # timeout for each metadata url read attempt
url_retries = 5 # number of times to retry url upon 404
+ # The datasource defines a list of supported EventTypes during which
+ # the datasource can react to changes in metadata and regenerate
+ # network configuration on metadata changes.
+ # A datasource which supports writing network config on each system boot
+ # would set update_events = {'network': [EventType.BOOT]}
+
+ # Default: generate network config on new instance id (first boot).
+ update_events = {'network': [EventType.BOOT_NEW_INSTANCE]}
+
+ # N-tuple listing default values for any metadata-related class
+ # attributes cached on an instance by a process_data runs. These attribute
+ # values are reset via clear_cached_attrs during any update_metadata call.
+ cached_attr_defaults = (
+ ('ec2_metadata', UNSET), ('network_json', UNSET),
+ ('metadata', {}), ('userdata', None), ('userdata_raw', None),
+ ('vendordata', None), ('vendordata_raw', None))
+
+ _dirty_cache = False
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
@@ -134,11 +154,31 @@ class DataSource(object):
'region': self.region,
'availability-zone': self.availability_zone}}
+ def clear_cached_attrs(self, attr_defaults=()):
+ """Reset any cached metadata attributes to datasource defaults.
+
+ @param attr_defaults: Optional tuple of (attr, value) pairs to
+ set instead of cached_attr_defaults.
+ """
+ if not self._dirty_cache:
+ return
+ if attr_defaults:
+ attr_values = attr_defaults
+ else:
+ attr_values = self.cached_attr_defaults
+
+ for attribute, value in attr_values:
+ if hasattr(self, attribute):
+ setattr(self, attribute, value)
+ if not attr_defaults:
+ self._dirty_cache = False
+
def get_data(self):
"""Datasources implement _get_data to setup metadata and userdata_raw.
Minimally, the datasource should return a boolean True on success.
"""
+ self._dirty_cache = True
return_value = self._get_data()
json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
if not return_value:
@@ -174,6 +214,7 @@ class DataSource(object):
return return_value
def _get_data(self):
+ """Walk metadata sources, process crawled data and save attributes."""
raise NotImplementedError(
'Subclasses of DataSource must implement _get_data which'
' sets self.metadata, vendordata_raw and userdata_raw.')
@@ -416,6 +457,41 @@ class DataSource(object):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
+ def update_metadata(self, source_event_types):
+ """Refresh cached metadata if the datasource supports this event.
+
+ The datasource has a list of update_events which
+ trigger refreshing all cached metadata as well as refreshing the
+ network configuration.
+
+ @param source_event_types: List of EventTypes which may trigger a
+ metadata update.
+
+ @return True if the datasource did successfully update cached metadata
+ due to source_event_type.
+ """
+ supported_events = {}
+ for event in source_event_types:
+ for update_scope, update_events in self.update_events.items():
+ if event in update_events:
+ if not supported_events.get(update_scope):
+ supported_events[update_scope] = []
+ supported_events[update_scope].append(event)
+ for scope, matched_events in supported_events.items():
+ LOG.debug(
+ "Update datasource metadata and %s config due to events: %s",
+ scope, ', '.join(matched_events))
+ # Each datasource has a cached config property which needs clearing
+ # Once cleared that config property will be regenerated from
+ # current metadata.
+ self.clear_cached_attrs((('_%s_config' % scope, UNSET),))
+ if supported_events:
+ self.clear_cached_attrs()
+ result = self.get_data()
+ if result:
+ return True
+ return False
+
def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still
return False
@@ -520,7 +596,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
s = cls(sys_cfg, distro, paths)
- if s.get_data():
+ if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
myrep.message = "found %s data from %s" % (mode, name)
return (s, type_utils.obj_name(cls))
except Exception:
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index d5bc98a4..dcd221be 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -5,10 +5,11 @@ import os
import six
import stat
+from cloudinit.event import EventType
from cloudinit.helpers import Paths
from cloudinit import importer
from cloudinit.sources import (
- INSTANCE_JSON_FILE, DataSource)
+ INSTANCE_JSON_FILE, DataSource, UNSET)
from cloudinit.tests.helpers import CiTestCase, skipIf, mock
from cloudinit.user_data import UserDataProcessor
from cloudinit import util
@@ -381,3 +382,83 @@ class TestDataSource(CiTestCase):
get_args(grandchild.get_hostname), # pylint: disable=W1505
'%s does not implement DataSource.get_hostname params'
% grandchild)
+
+ def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, value in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr, value in self.datasource.cached_attr_defaults:
+ self.assertEqual(value, getattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_noops_on_clean_cache(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, _ in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = False # Fake clean cache
+ self.datasource.clear_cached_attrs()
+ count = 0
+ for attr, _ in self.datasource.cached_attr_defaults:
+ self.assertEqual(count, getattr(self.datasource, attr))
+ count += 1
+
+ def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
+ """Skip any cached_attr_defaults which aren't class attributes."""
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr in ('ec2_metadata', 'network_json'):
+ self.assertFalse(hasattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_of_custom_attrs(self):
+ """Custom attr_values can be passed to clear_cached_attrs."""
+ self.datasource._dirty_cache = True
+ cached_attr_name = self.datasource.cached_attr_defaults[0][0]
+ setattr(self.datasource, cached_attr_name, 'himom')
+ self.datasource.myattr = 'orig'
+ self.datasource.clear_cached_attrs(
+ attr_defaults=(('myattr', 'updated'),))
+ self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
+ self.assertEqual('updated', self.datasource.myattr)
+
+ def test_update_metadata_only_acts_on_supported_update_events(self):
+ """update_metadata won't get_data on unsupported update events."""
+ self.assertEqual(
+ {'network': [EventType.BOOT_NEW_INSTANCE]},
+ self.datasource.update_events)
+
+ def fake_get_data():
+ raise Exception('get_data should not be called')
+
+ self.datasource.get_data = fake_get_data
+ self.assertFalse(
+ self.datasource.update_metadata(
+ source_event_types=[EventType.BOOT]))
+
+ def test_update_metadata_returns_true_on_supported_update_event(self):
+ """update_metadata returns get_data response on supported events."""
+
+ def fake_get_data():
+ return True
+
+ self.datasource.get_data = fake_get_data
+ self.datasource._network_config = 'something'
+ self.datasource._dirty_cache = True
+ self.assertTrue(
+ self.datasource.update_metadata(
+ source_event_types=[
+ EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
+ self.assertEqual(UNSET, self.datasource._network_config)
+ self.assertIn(
+ "DEBUG: Update datasource metadata and network config due to"
+ " events: New instance first boot",
+ self.logs.getvalue())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 286607bf..c132b57d 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -22,6 +22,8 @@ from cloudinit.handlers import cloud_config as cc_part
from cloudinit.handlers import shell_script as ss_part
from cloudinit.handlers import upstart_job as up_part
+from cloudinit.event import EventType
+
from cloudinit import cloud
from cloudinit import config
from cloudinit import distros
@@ -648,10 +650,14 @@ class Init(object):
except Exception as e:
LOG.warning("Failed to rename devices: %s", e)
- if (self.datasource is not NULL_DATA_SOURCE and
- not self.is_new_instance()):
- LOG.debug("not a new instance. network config is not applied.")
- return
+ if self.datasource is not NULL_DATA_SOURCE:
+ if not self.is_new_instance():
+ if not self.datasource.update_metadata([EventType.BOOT]):
+ LOG.debug(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update on '%s' event",
+ EventType.BOOT)
+ return
LOG.info("Applying network configuration from %s bringup=%s: %s",
src, bring_up, netcfg)
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
new file mode 100644
index 00000000..0562b966
--- /dev/null
+++ b/cloudinit/tests/test_gpg.py
@@ -0,0 +1,54 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Test gpg module."""
+
+from cloudinit import gpg
+from cloudinit import util
+from cloudinit.tests.helpers import CiTestCase
+
+import mock
+
+
+@mock.patch("cloudinit.gpg.time.sleep")
+@mock.patch("cloudinit.gpg.util.subp")
+class TestReceiveKeys(CiTestCase):
+ """Test the recv_key method."""
+
+ def test_retries_on_subp_exc(self, m_subp, m_sleep):
+ """retry should be done on gpg receive keys failure."""
+ retries = (1, 2, 4)
+ my_exc = util.ProcessExecutionError(
+ stdout='', stderr='', exit_code=2, cmd=['mycmd'])
+ m_subp.side_effect = (my_exc, my_exc, ('', ''))
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
+ self.assertEqual([mock.call(1), mock.call(2)], m_sleep.call_args_list)
+
+ def test_raises_error_after_retries(self, m_subp, m_sleep):
+ """If the final run fails, error should be raised."""
+ naplen = 1
+ keyid, keyserver = ("ABCD", "keyserver.example.com")
+ m_subp.side_effect = util.ProcessExecutionError(
+ stdout='', stderr='', exit_code=2, cmd=['mycmd'])
+ with self.assertRaises(ValueError) as rcm:
+ gpg.recv_key(keyid, keyserver, retries=(naplen,))
+ self.assertIn(keyid, str(rcm.exception))
+ self.assertIn(keyserver, str(rcm.exception))
+ m_sleep.assert_called_with(naplen)
+
+ def test_no_retries_on_none(self, m_subp, m_sleep):
+ """retry should not be done if retries is None."""
+ m_subp.side_effect = util.ProcessExecutionError(
+ stdout='', stderr='', exit_code=2, cmd=['mycmd'])
+ with self.assertRaises(ValueError):
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
+ m_sleep.assert_not_called()
+
+ def test_expected_gpg_command(self, m_subp, m_sleep):
+ """Verify gpg is called with expected args."""
+ key, keyserver = ("DEADBEEF", "keyserver.example.com")
+ retries = (1, 2, 4)
+ m_subp.return_value = ('', '')
+ gpg.recv_key(key, keyserver, retries=retries)
+ m_subp.assert_called_once_with(
+ ['gpg', '--keyserver=%s' % keyserver, '--recv-keys', key],
+ capture=True)
+ m_sleep.assert_not_called()
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
new file mode 100644
index 00000000..94b6b255
--- /dev/null
+++ b/cloudinit/tests/test_stages.py
@@ -0,0 +1,231 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests related to cloudinit.stages module."""
+
+import os
+
+from cloudinit import stages
+from cloudinit import sources
+
+from cloudinit.event import EventType
+from cloudinit.util import write_file
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+TEST_INSTANCE_ID = 'i-testing'
+
+
+class FakeDataSource(sources.DataSource):
+
+ def __init__(self, paths=None, userdata=None, vendordata=None,
+ network_config=''):
+ super(FakeDataSource, self).__init__({}, None, paths=paths)
+ self.metadata = {'instance-id': TEST_INSTANCE_ID}
+ self.userdata_raw = userdata
+ self.vendordata_raw = vendordata
+ self._network_config = None
+ if network_config: # Permit for None value to setup attribute
+ self._network_config = network_config
+
+ @property
+ def network_config(self):
+ return self._network_config
+
+ def _get_data(self):
+ return True
+
+
+class TestInit(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestInit, self).setUp()
+ self.tmpdir = self.tmp_dir()
+ self.init = stages.Init()
+ # Setup fake Paths for Init to reference
+ self.init._cfg = {'system_info': {
+ 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir,
+ 'run_dir': self.tmpdir}}}
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+
+ def test_wb__find_networking_config_disabled(self):
+ """find_networking_config returns no config when disabled."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath('data'), 'upgraded-network')
+ write_file(disable_file, '')
+ self.assertEqual(
+ (None, disable_file),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline):
+ """find_networking_config returns when disabled by kernel cmdline."""
+ m_cmdline.return_value = {'config': 'disabled'}
+ self.assertEqual(
+ (None, 'cmdline'),
+ self.init._find_networking_config())
+ self.assertEqual('DEBUG: network config disabled by cmdline\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline):
+ """find_networking_config returns when disabled by datasource cfg."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': {}} # system config doesn't disable
+
+ self.init.datasource = FakeDataSource(
+ network_config={'config': 'disabled'})
+ self.assertEqual(
+ (None, 'ds'),
+ self.init._find_networking_config())
+ self.assertEqual('DEBUG: network config disabled by ds\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline):
+ """find_networking_config returns when disabled by system config."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': {'config': 'disabled'}}
+ self.assertEqual(
+ (None, 'system_cfg'),
+ self.init._find_networking_config())
+ self.assertEqual('DEBUG: network config disabled by system_cfg\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_kernel(self, m_cmdline):
+ """find_networking_config returns kernel cmdline config if present."""
+ expected_cfg = {'config': ['fakekernel']}
+ m_cmdline.return_value = expected_cfg
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': {'config': ['fakesys_config']}}
+ self.init.datasource = FakeDataSource(
+ network_config={'config': ['fakedatasource']})
+ self.assertEqual(
+ (expected_cfg, 'cmdline'),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline):
+ """find_networking_config returns system config when present."""
+ m_cmdline.return_value = {} # No kernel network config
+ expected_cfg = {'config': ['fakesys_config']}
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': expected_cfg}
+ self.init.datasource = FakeDataSource(
+ network_config={'config': ['fakedatasource']})
+ self.assertEqual(
+ (expected_cfg, 'system_cfg'),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline):
+ """find_networking_config returns datasource net config if present."""
+ m_cmdline.return_value = {} # No kernel network config
+ # No system config for network in setUp
+ expected_cfg = {'config': ['fakedatasource']}
+ self.init.datasource = FakeDataSource(network_config=expected_cfg)
+ self.assertEqual(
+ (expected_cfg, 'ds'),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_fallback(self, m_cmdline):
+ """find_networking_config returns fallback config if not defined."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ # Neither datasource nor system_info disable or provide network
+
+ fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}],
+ 'version': 1}
+
+ def fake_generate_fallback():
+ return fake_cfg
+
+ # Monkey patch distro which gets cached on self.init
+ distro = self.init.distro
+ distro.generate_fallback_config = fake_generate_fallback
+ self.assertEqual(
+ (fake_cfg, 'fallback'),
+ self.init._find_networking_config())
+ self.assertNotIn('network config disabled', self.logs.getvalue())
+
+ def test_apply_network_config_disabled(self):
+ """Log when network is disabled by upgraded-network."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath('data'), 'upgraded-network')
+
+ def fake_network_config():
+ return (None, disable_file)
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.assertIn(
+ 'INFO: network config is disabled by %s' % disable_file,
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ def test_apply_network_on_new_instance(self, m_ubuntu):
+ """Call distro apply_network_config methods on is_new_instance."""
+ net_cfg = {
+ 'version': 1, 'config': [
+ {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
+ 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
+
+ def fake_network_config():
+ return net_cfg, 'fallback'
+
+ self.init._find_networking_config = fake_network_config
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True)
+
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ def test_apply_network_on_same_instance_id(self, m_ubuntu):
+ """Only call distro.apply_network_config_names on same instance id."""
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath('data'), 'instance-id')
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ 'version': 1, 'config': [
+ {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
+ 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
+
+ def fake_network_config():
+ return net_cfg, 'fallback'
+
+ self.init._find_networking_config = fake_network_config
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_not_called()
+ self.assertIn(
+ 'No network config applied. Neither a new instance'
+ " nor datasource network update on '%s' event" % EventType.BOOT,
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ def test_apply_network_on_datasource_allowed_event(self, m_ubuntu):
+ """Apply network if datasource.update_metadata permits BOOT event."""
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath('data'), 'instance-id')
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ 'version': 1, 'config': [
+ {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
+ 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
+
+ def fake_network_config():
+ return net_cfg, 'fallback'
+
+ self.init._find_networking_config = fake_network_config
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self.init.datasource.update_events = {'network': [EventType.BOOT]}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 17853fc7..6a31e505 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -26,8 +26,51 @@ OS_RELEASE_SLES = dedent("""\
CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
""")
+OS_RELEASE_OPENSUSE = dedent("""\
+NAME="openSUSE Leap"
+VERSION="42.3"
+ID=opensuse
+ID_LIKE="suse"
+VERSION_ID="42.3"
+PRETTY_NAME="openSUSE Leap 42.3"
+ANSI_COLOR="0;32"
+CPE_NAME="cpe:/o:opensuse:leap:42.3"
+BUG_REPORT_URL="https://bugs.opensuse.org"
+HOME_URL="https://www.opensuse.org/"
+""")
+
+OS_RELEASE_CENTOS = dedent("""\
+ NAME="CentOS Linux"
+ VERSION="7 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="7"
+ PRETTY_NAME="CentOS Linux 7 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:7"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-7"
+ CENTOS_MANTISBT_PROJECT_VERSION="7"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7"
+""")
+
+OS_RELEASE_DEBIAN = dedent("""\
+ PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
+ NAME="Debian GNU/Linux"
+ VERSION_ID="9"
+ VERSION="9 (stretch)"
+ ID=debian
+ HOME_URL="https://www.debian.org/"
+ SUPPORT_URL="https://www.debian.org/support"
+ BUG_REPORT_URL="https://bugs.debian.org/"
+""")
+
OS_RELEASE_UBUNTU = dedent("""\
NAME="Ubuntu"\n
+ # comment test
VERSION="16.04.3 LTS (Xenial Xerus)"\n
ID=ubuntu\n
ID_LIKE=debian\n
@@ -310,7 +353,31 @@ class TestGetLinuxDistro(CiTestCase):
m_os_release.return_value = OS_RELEASE_UBUNTU
m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
dist = util.get_linux_distro()
- self.assertEqual(('ubuntu', '16.04', platform.machine()), dist)
+ self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_centos(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on CentOS."""
+ m_os_release.return_value = OS_RELEASE_CENTOS
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '7', 'Core'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_debian(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on Debian."""
+ m_os_release.return_value = OS_RELEASE_DEBIAN
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('debian', '9', 'stretch'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_opensuse(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on OpenSUSE."""
+ m_os_release.return_value = OS_RELEASE_OPENSUSE
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('opensuse', '42.3', platform.machine()), dist)
@mock.patch('platform.dist')
def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 6da95113..d0b0e90a 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -579,16 +579,24 @@ def get_cfg_option_int(yobj, key, default=0):
def get_linux_distro():
distro_name = ''
distro_version = ''
+ flavor = ''
if os.path.exists('/etc/os-release'):
- os_release = load_file('/etc/os-release')
- for line in os_release.splitlines():
- if line.strip().startswith('ID='):
- distro_name = line.split('=')[-1]
- distro_name = distro_name.replace('"', '')
- if line.strip().startswith('VERSION_ID='):
- # Lets hope for the best that distros stay consistent ;)
- distro_version = line.split('=')[-1]
- distro_version = distro_version.replace('"', '')
+ os_release = load_shell_content(load_file('/etc/os-release'))
+ distro_name = os_release.get('ID', '')
+ distro_version = os_release.get('VERSION_ID', '')
+ if 'sles' in distro_name or 'suse' in distro_name:
+ # RELEASE_BLOCKER: We will drop this sles ivergent behavior in
+ # before 18.4 so that get_linux_distro returns a named tuple
+ # which will include both version codename and architecture
+ # on all distributions.
+ flavor = platform.machine()
+ else:
+ flavor = os_release.get('VERSION_CODENAME', '')
+ if not flavor:
+ match = re.match(r'[^ ]+ \((?P<codename>[^)]+)\)',
+ os_release.get('VERSION'))
+ if match:
+ flavor = match.groupdict()['codename']
else:
dist = ('', '', '')
try:
@@ -606,7 +614,7 @@ def get_linux_distro():
'expansion may have unexpected results')
return dist
- return (distro_name, distro_version, platform.machine())
+ return (distro_name, distro_version, flavor)
def system_info():
diff --git a/debian/changelog b/debian/changelog
index 4817495b..d6a89e42 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,19 @@
+cloud-init (18.3-9-g2e62cb8a-0ubuntu1) cosmic; urgency=medium
+
+ * New upstream snapshot.
+ - docs: note in rtd about avoiding /tmp when writing files
+ - ubuntu,centos,debian: get_linux_distro to align with platform.dist
+ - Fix boothook docs on environment variable name (INSTANCE_I ->
+ INSTANCE_ID) [Marc Tamsky]
+ - update_metadata: a datasource can support network re-config every boot
+ - tests: drop salt-minion integration test
+ - Retry on failed import of gpg receive keys.
+ - tools: Fix run-container when neither source or binary package requested.
+ - docs: Fix a small spelling error. [Oz N Tiram]
+ - tox: use simplestreams from git repository rather than bzr.
+
+ -- Chad Smith <chad.smith@canonical.com> Mon, 09 Jul 2018 14:16:03 -0600
+
cloud-init (18.3-0ubuntu1) cosmic; urgency=medium
* New upstream release.
diff --git a/doc/examples/cloud-config-run-cmds.txt b/doc/examples/cloud-config-run-cmds.txt
index 3bb06864..002398f5 100644
--- a/doc/examples/cloud-config-run-cmds.txt
+++ b/doc/examples/cloud-config-run-cmds.txt
@@ -18,5 +18,8 @@ runcmd:
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- [ sh, -c, echo "=========hello world'=========" ]
- ls -l /root
- - [ wget, "http://slashdot.org", -O, /tmp/index.html ]
+ # Note: Don't write files to /tmp from cloud-init use /run/somedir instead.
+ # Early boot environments can race systemd-tmpfiles-clean LP: #1707222.
+ - mkdir /run/mydir
+ - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ]
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index bd84c641..774f66b9 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -127,7 +127,10 @@ runcmd:
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- [ sh, -c, echo "=========hello world'=========" ]
- ls -l /root
- - [ wget, "http://slashdot.org", -O, /tmp/index.html ]
+ # Note: Don't write files to /tmp from cloud-init use /run/somedir instead.
+ # Early boot environments can race systemd-tmpfiles-clean LP: #1707222.
+ - mkdir /run/mydir
+ - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ]
# boot commands
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index e25289ad..1b0ff366 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -121,7 +121,7 @@ Cloud Boothook
This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately.
This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself.
-It is provided with the instance id in the environment variable ``INSTANCE_I``. This could be made use of to provide a 'once-per-instance' type of functionality.
+It is provided with the instance id in the environment variable ``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' type of functionality.
Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive.
diff --git a/integration-requirements.txt b/integration-requirements.txt
index e5bb5b28..01baebdd 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -17,4 +17,4 @@ git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779
# finds latest image information
-bzr+lp:simplestreams
+git+https://git.launchpad.net/simplestreams
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py
deleted file mode 100644
index fc9688ed..00000000
--- a/tests/cloud_tests/testcases/modules/salt_minion.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class Test(base.CloudTestCase):
- """Test salt minion module."""
-
- def test_minon_master(self):
- """Test master value in config."""
- out = self.get_data_file('minion')
- self.assertIn('master: salt.mydomain.com', out)
-
- def test_minion_pem(self):
- """Test private key."""
- out = self.get_data_file('minion.pem')
- self.assertIn('------BEGIN PRIVATE KEY------', out)
- self.assertIn('<key data>', out)
- self.assertIn('------END PRIVATE KEY-------', out)
-
- def test_minion_pub(self):
- """Test public key."""
- out = self.get_data_file('minion.pub')
- self.assertIn('------BEGIN PUBLIC KEY-------', out)
- self.assertIn('<key data>', out)
- self.assertIn('------END PUBLIC KEY-------', out)
-
- def test_grains(self):
- """Test master value in config."""
- out = self.get_data_file('grains')
- self.assertIn('role: web', out)
-
- def test_minion_installed(self):
- """Test if the salt-minion package is installed"""
- self.assertPackageInstalled('salt-minion')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml
deleted file mode 100644
index 9227147c..00000000
--- a/tests/cloud_tests/testcases/modules/salt_minion.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-# Create config for a salt minion
-#
-# 2016-11-17: Currently takes >60 seconds results in test failure
-#
-enabled: True
-cloud_config: |
- #cloud-config
- salt_minion:
- conf:
- master: salt.mydomain.com
- public_key: |
- ------BEGIN PUBLIC KEY-------
- <key data>
- ------END PUBLIC KEY-------
- private_key: |
- ------BEGIN PRIVATE KEY------
- <key data>
- ------END PRIVATE KEY-------
- grains:
- role: web
-collect_scripts:
- minion: |
- #!/bin/bash
- cat /etc/salt/minion
- minion_id: |
- #!/bin/bash
- cat /etc/salt/minion_id
- minion.pem: |
- #!/bin/bash
- PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem
- if [ ! -f $PRIV_KEYFILE ]; then
- # Bionic and later automatically moves /etc/salt/pki/minion/*
- PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem
- fi
- cat $PRIV_KEYFILE
- minion.pub: |
- #!/bin/bash
- PUB_KEYFILE=/etc/salt/pki/minion/minion.pub
- if [ ! -f $PUB_KEYFILE ]; then
- # Bionic and later automatically moves /etc/salt/pki/minion/*
- PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub
- fi
- cat $PUB_KEYFILE
- grains: |
- #!/bin/bash
- cat /etc/salt/grains
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index af9d3e1a..26b2b93d 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -85,7 +85,9 @@ class TestFindEndpoint(CiTestCase):
self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None))
- def test_latest_lease_used(self):
+ @mock.patch('cloudinit.sources.helpers.azure.util.is_FreeBSD')
+ def test_latest_lease_used(self, m_is_freebsd):
+ m_is_freebsd.return_value = False # To avoid hitting load_file
encoded_addresses = ['5:4:3:2', '4:3:2:1']
file_content = '\n'.join([self._build_lease_content(encoded_address)
for encoded_address in encoded_addresses])
diff --git a/tools/run-container b/tools/run-container
index 499e85b0..6dedb757 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -418,7 +418,7 @@ main() {
{ bad_Usage; return; }
local cur="" next=""
- local package="" source_package="" unittest="" name=""
+ local package=false srcpackage=false unittest="" name=""
local dirty=false pyexe="auto" artifact_d="."
while [ $# -ne 0 ]; do
@@ -430,8 +430,8 @@ main() {
-k|--keep) KEEP=true;;
-n|--name) name="$next"; shift;;
--pyexe) pyexe=$next; shift;;
- -p|--package) package=1;;
- -s|--source-package) source_package=1;;
+ -p|--package) package=true;;
+ -s|--source-package) srcpackage=true;;
-u|--unittest) unittest=1;;
-v|--verbose) VERBOSITY=$((VERBOSITY+1));;
--) shift; break;;
@@ -529,8 +529,8 @@ main() {
build_srcpkg="./packages/brpm $distflag --srpm"
pkg_ext=".rpm";;
esac
- if [ -n "$source_package" ]; then
- [ -n "$build_pkg" ] || {
+ if [ "$srcpackage" = "true" ]; then
+ [ -n "$build_srcpkg" ] || {
error "Unknown package command for $OS_NAME"
return 1
}
@@ -542,19 +542,21 @@ main() {
}
fi
- if [ -n "$package" ]; then
- [ -n "$build_srcpkg" ] || {
+ if [ "$package" = "true" ]; then
+ [ -n "$build_pkg" ] || {
error "Unknown build source command for $OS_NAME"
return 1
}
debug 1 "building binary package with $build_pkg."
+ # shellcheck disable=SC2086
inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || {
errorrc "failed: $build_pkg";
errors[${#errors[@]}]="binary package"
}
fi
- if [ -n "$artifact_d" ]; then
+ if [ -n "$artifact_d" ] &&
+ [ "$package" = "true" -o "$srcpackage" = "true" ]; then
local art=""
artifact_d="${artifact_d%/}/"
[ -d "${artifact_d}" ] || mkdir -p "$artifact_d" || {