summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml82
-rw-r--r--HACKING.rst270
-rw-r--r--README.md2
-rw-r--r--cloudinit/analyze/dump.py3
-rw-r--r--cloudinit/analyze/show.py5
-rw-r--r--cloudinit/analyze/tests/test_boot.py16
-rw-r--r--cloudinit/analyze/tests/test_dump.py3
-rw-r--r--cloudinit/atomic_helper.py4
-rw-r--r--cloudinit/cmd/clean.py5
-rw-r--r--cloudinit/cmd/devel/logs.py4
-rwxr-xr-xcloudinit/cmd/devel/render.py5
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py3
-rw-r--r--cloudinit/cmd/query.py5
-rw-r--r--cloudinit/config/cc_apt_configure.py21
-rw-r--r--cloudinit/config/cc_bootcmd.py3
-rwxr-xr-xcloudinit/config/cc_byobu.py3
-rw-r--r--cloudinit/config/cc_ca_certs.py5
-rw-r--r--cloudinit/config/cc_chef.py403
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py7
-rw-r--r--cloudinit/config/cc_disk_setup.py45
-rw-r--r--cloudinit/config/cc_emit_upstart.py8
-rw-r--r--cloudinit/config/cc_fan.py7
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py25
-rw-r--r--cloudinit/config/cc_grub_dpkg.py9
-rw-r--r--cloudinit/config/cc_keys_to_console.py3
-rw-r--r--cloudinit/config/cc_landscape.py3
-rw-r--r--cloudinit/config/cc_lxd.py23
-rw-r--r--cloudinit/config/cc_mcollective.py3
-rw-r--r--cloudinit/config/cc_mounts.py36
-rw-r--r--cloudinit/config/cc_ntp.py9
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py3
-rw-r--r--cloudinit/config/cc_power_state_change.py3
-rw-r--r--cloudinit/config/cc_puppet.py9
-rw-r--r--cloudinit/config/cc_resizefs.py9
-rw-r--r--cloudinit/config/cc_rh_subscription.py19
-rw-r--r--cloudinit/config/cc_rsyslog.py5
-rw-r--r--cloudinit/config/cc_salt_minion.py4
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py4
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py4
-rw-r--r--cloudinit/config/cc_scripts_per_once.py4
-rw-r--r--cloudinit/config/cc_scripts_user.py4
-rw-r--r--cloudinit/config/cc_scripts_vendor.py3
-rw-r--r--cloudinit/config/cc_seed_random.py5
-rwxr-xr-xcloudinit/config/cc_set_passwords.py5
-rw-r--r--cloudinit/config/cc_snap.py7
-rw-r--r--cloudinit/config/cc_spacewalk.py8
-rwxr-xr-xcloudinit/config/cc_ssh.py7
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py5
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py11
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py9
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py12
-rw-r--r--cloudinit/config/tests/test_final_message.py46
-rw-r--r--cloudinit/config/tests/test_grub_dpkg.py6
-rw-r--r--cloudinit/config/tests/test_mounts.py4
-rw-r--r--cloudinit/config/tests/test_set_passwords.py22
-rw-r--r--cloudinit/config/tests/test_snap.py12
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py28
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py26
-rw-r--r--cloudinit/conftest.py72
-rwxr-xr-xcloudinit/distros/__init__.py38
-rw-r--r--cloudinit/distros/arch.py17
-rw-r--r--cloudinit/distros/bsd.py9
-rw-r--r--cloudinit/distros/debian.py9
-rw-r--r--cloudinit/distros/freebsd.py13
-rw-r--r--cloudinit/distros/gentoo.py17
-rw-r--r--cloudinit/distros/netbsd.py21
-rw-r--r--cloudinit/distros/networking.py140
-rw-r--r--cloudinit/distros/openbsd.py10
-rw-r--r--cloudinit/distros/opensuse.py7
-rw-r--r--cloudinit/distros/rhel.py9
-rw-r--r--cloudinit/distros/tests/test_init.py35
-rw-r--r--cloudinit/distros/tests/test_networking.py42
-rw-r--r--cloudinit/features.py44
-rw-r--r--cloudinit/gpg.py14
-rw-r--r--cloudinit/handlers/boot_hook.py5
-rw-r--r--cloudinit/handlers/upstart_job.py11
-rw-r--r--cloudinit/net/__init__.py68
-rw-r--r--cloudinit/net/bsd.py16
-rw-r--r--cloudinit/net/dhcp.py38
-rw-r--r--cloudinit/net/eni.py9
-rw-r--r--cloudinit/net/freebsd.py7
-rw-r--r--cloudinit/net/netbsd.py10
-rw-r--r--cloudinit/net/netplan.py17
-rw-r--r--cloudinit/net/network_state.py9
-rw-r--r--cloudinit/net/openbsd.py12
-rw-r--r--cloudinit/net/sysconfig.py21
-rw-r--r--cloudinit/net/tests/test_dhcp.py50
-rw-r--r--cloudinit/net/tests/test_init.py37
-rw-r--r--cloudinit/netinfo.py27
-rwxr-xr-xcloudinit/reporting/handlers.py26
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py8
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py108
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py3
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py3
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceHetzner.py9
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py3
-rw-r--r--cloudinit/sources/DataSourceOVF.py7
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py35
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py19
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py5
-rwxr-xr-xcloudinit/sources/helpers/azure.py31
-rw-r--r--cloudinit/sources/helpers/digitalocean.py21
-rw-r--r--cloudinit/sources/helpers/hetzner.py19
-rw-r--r--cloudinit/sources/helpers/openstack.py8
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py165
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py3
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py39
-rw-r--r--cloudinit/sources/tests/test_init.py9
-rw-r--r--cloudinit/ssh_util.py2
-rw-r--r--cloudinit/subp.py333
-rw-r--r--cloudinit/tests/helpers.py13
-rw-r--r--cloudinit/tests/test_conftest.py38
-rw-r--r--cloudinit/tests/test_features.py60
-rw-r--r--cloudinit/tests/test_gpg.py10
-rw-r--r--cloudinit/tests/test_netinfo.py40
-rw-r--r--cloudinit/tests/test_subp.py227
-rw-r--r--cloudinit/tests/test_util.py78
-rw-r--r--cloudinit/user_data.py31
-rw-r--r--cloudinit/util.py428
-rw-r--r--config/cloud.cfg.d/05_logging.cfg2
-rw-r--r--config/cloud.cfg.tmpl2
-rw-r--r--conftest.py95
-rw-r--r--debian/changelog72
-rw-r--r--doc/rtd/topics/cli.rst16
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst16
-rw-r--r--doc/rtd/topics/datasources/maas.rst2
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst6
-rw-r--r--doc/rtd/topics/datasources/openstack.rst2
-rwxr-xr-xpackages/bddeb11
-rwxr-xr-xpackages/brpm5
-rwxr-xr-xsystemd/cloud-init-generator.tmpl2
-rw-r--r--systemd/cloud-init.service.tmpl2
-rw-r--r--templates/hosts.freebsd.tmpl9
-rw-r--r--tests/cloud_tests/bddeb.py8
-rw-r--r--tests/cloud_tests/platforms/lxd/image.py38
-rw-r--r--tests/cloud_tests/platforms/lxd/instance.py3
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/image.py12
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/instance.py10
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/platform.py5
-rw-r--r--tests/cloud_tests/platforms/platforms.py9
-rw-r--r--tests/cloud_tests/testcases/base.py2
-rw-r--r--tests/cloud_tests/util.py7
-rw-r--r--tests/unittests/test_builtin_handlers.py3
-rw-r--r--tests/unittests/test_data.py25
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py2
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py7
-rw-r--r--tests/unittests/test_datasource/test_azure.py76
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py2
-rw-r--r--tests/unittests/test_datasource/test_cloudstack.py2
-rw-r--r--tests/unittests/test_datasource/test_ec2.py17
-rw-r--r--tests/unittests/test_datasource/test_hetzner.py23
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py156
-rw-r--r--tests/unittests/test_datasource/test_openstack.py2
-rw-r--r--tests/unittests/test_datasource/test_ovf.py9
-rw-r--r--tests/unittests/test_datasource/test_rbx.py10
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py81
-rw-r--r--tests/unittests/test_datasource/test_smartos.py6
-rw-r--r--tests/unittests/test_distros/test_bsd_utils.py5
-rw-r--r--tests/unittests/test_distros/test_create_users.py8
-rw-r--r--tests/unittests/test_distros/test_debian.py2
-rw-r--r--tests/unittests/test_distros/test_freebsd.py4
-rw-r--r--tests/unittests/test_distros/test_generic.py190
-rw-r--r--tests/unittests/test_distros/test_netconfig.py7
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py6
-rw-r--r--tests/unittests/test_ds_identify.py25
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py11
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py5
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py20
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py48
-rw-r--r--tests/unittests/test_handler/test_handler_bootcmd.py6
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py5
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py6
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py8
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py10
-rw-r--r--tests/unittests/test_handler/test_handler_landscape.py6
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py2
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py28
-rw-r--r--tests/unittests/test_handler/test_handler_mcollective.py7
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py25
-rw-r--r--tests/unittests/test_handler/test_handler_ntp.py51
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py49
-rw-r--r--tests/unittests/test_handler/test_handler_runcmd.py4
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py5
-rw-r--r--tests/unittests/test_handler/test_handler_spacewalk.py20
-rw-r--r--tests/unittests/test_handler/test_schema.py3
-rw-r--r--tests/unittests/test_net.py61
-rw-r--r--tests/unittests/test_net_freebsd.py2
-rw-r--r--tests/unittests/test_render_cloudcfg.py8
-rw-r--r--tests/unittests/test_reporting_hyperv.py13
-rw-r--r--tests/unittests/test_rh_subscription.py18
-rw-r--r--tests/unittests/test_sshutil.py18
-rw-r--r--tests/unittests/test_util.py281
-rw-r--r--tests/unittests/test_vmware/test_guestcust_util.py22
-rw-r--r--tools/.github-cla-signers8
-rwxr-xr-xtools/read-dependencies8
-rwxr-xr-xtools/run-container5
-rw-r--r--tox.ini15
201 files changed, 3640 insertions, 2079 deletions
diff --git a/.travis.yml b/.travis.yml
index 84f38533..9844c065 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,18 +3,25 @@ dist: bionic
# We use two different caching strategies. The default is to cache pip
# packages (as most of our jobs use pip packages), which is configured here.
-# For the integration tests, we instead want to cache the lxd images. The
-# directory in which the images are stored (/var/snap/lxd/common/lxd/images/)
-# is not readable/writeable by the default user (which is a requirement for
-# caching), so we instead cache the `lxd_images/` directory. We move lxd
-# images out of there before we run tests and back in once tests are complete.
-# We _move_ the images out and only copy the most recent lxd image back into
-# the cache, to avoid our cache growing without bound. (We only need the most
-# recent lxd image because the integration tests only use a single image.)
+# For the integration tests, we instead want to cache the lxd images and
+# package build schroot.
#
# We cache the lxd images because this saves a few seconds in the general
# case, but provides substantial speed-ups when cloud-images.ubuntu.com, the
-# source of the images, is under heavy load.
+# source of the images, is under heavy load. The directory in which the lxd
+# images are stored (/var/snap/lxd/common/lxd/images/) is not
+# readable/writeable by the default user (which is a requirement for caching),
+# so we instead cache the `lxd_images/` directory. We move lxd images out of
+# there before we run tests and back in once tests are complete. We _move_ the
+# images out and only copy the most recent lxd image back into the cache, to
+# avoid our cache growing without bound. (We only need the most recent lxd
+# image because the integration tests only use a single image.)
+#
+# We cache the package build schroot because it saves 2-3 minutes per build.
+# Without caching, we have to perform a debootstrap for every build. We update
+# the schroot before storing it back in the cache, to ensure that we aren't
+# just using an increasingly-old schroot as time passes. The cached schroot is
+# stored as a tarball, to preserve permissions/ownership.
cache: pip
install:
@@ -37,6 +44,7 @@ matrix:
cache:
- directories:
- lxd_images
+ - chroots
before_cache:
- |
# Find the most recent image file
@@ -44,13 +52,10 @@ matrix:
# This might be <hash>.rootfs or <hash>, normalise
latest_file="$(basename $latest_file .rootfs)"
# Find all files with that prefix and copy them to our cache dir
- sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} lxd_images/ \;
+ sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \;
install:
- git fetch --unshallow
- - sudo apt-get build-dep -y cloud-init
- - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox
- # These are build deps but not pulled in by the build-dep call above
- - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-pytest python3-pytest-cov
+ - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper
- pip install .
- pip install tox
# bionic has lxd from deb installed, remove it first to ensure
@@ -61,15 +66,56 @@ matrix:
- sudo lxd init --auto
- sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles
# Move any cached lxd images into lxd's image dir
- - sudo find lxd_images/ -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
+ - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
- sudo usermod -a -G lxd $USER
- sudo sbuild-adduser $USER
- cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
script:
# Ubuntu LTS: Build
- - ./packages/bddeb -S
- # Use this to get a new shell where we're in the sbuild group
- - sudo -E su $USER -c 'mk-sbuild xenial'
+ - ./packages/bddeb -S -d --release xenial
+ - |
+ needs_caching=false
+ if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then
+ # If we have a cached chroot, move it into place
+ sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64
+ sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64
+ # Write its configuration
+ cat > sbuild-xenial-amd64 << EOM
+ [xenial-amd64]
+ description=xenial-amd64
+ groups=sbuild,root,admin
+ root-groups=sbuild,root,admin
+ # Uncomment these lines to allow members of these groups to access
+ # the -source chroots directly (useful for automated updates, etc).
+ #source-root-users=sbuild,root,admin
+ #source-root-groups=sbuild,root,admin
+ type=directory
+ profile=sbuild
+ union-type=overlay
+ directory=/var/lib/schroot/chroots/xenial-amd64
+ EOM
+ sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/
+ sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64
+ # And ensure it's up-to-date.
+ before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)"
+ sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
+ after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)
+ if [ "$before_pkgs" != "$after_pkgs" ]; then
+ needs_caching=true
+ fi
+ else
+ # Otherwise, create the chroot
+ sudo -E su $USER -c 'mk-sbuild xenial'
+ needs_caching=true
+ fi
+ # If there are changes to the schroot (or it's entirely new),
+ # tar up the schroot (to preserve ownership/permissions) and
+ # move it into the cached dir; no need to compress it because
+ # Travis will do that anyway
+ if [ "$needs_caching" = "true" ]; then
+ sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 .
+ fi
+ # Use sudo to get a new shell where we're in the sbuild group
- sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
# Ubuntu LTS: Integration
- sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb'
diff --git a/HACKING.rst b/HACKING.rst
index d026cf71..27a38bc3 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -218,15 +218,24 @@ The following guidelines should be followed:
[#fixture-list]_:
* ``cache``
- * ``capsys``
* ``capfd``
- * ``record_xml_property``
+ * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial)
+ * ``capsys``
* ``monkeypatch``
* ``pytestconfig``
+ * ``record_xml_property``
* ``recwarn``
* ``tmpdir_factory``
* ``tmpdir``
+ * On xenial, the objects returned by the ``tmpdir`` fixture cannot be
+ used where paths are required; they are rejected as invalid paths.
+ You must instead use their ``.strpath`` attribute.
+
+ * For example, instead of
+ ``util.write_file(tmpdir.join("some_file"), ...)``, you should
+ write ``util.write_file(tmpdir.join("some_file").strpath, ...)``.
+
* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
should start with ``m_`` to clearly distinguish them from non-mock
variables
@@ -320,9 +329,16 @@ variable annotations specified in `PEP-526`_ were introduced in Python
.. [#fixture-list] This list of fixtures (with markup) can be
reproduced by running::
- py.test-3 --fixtures -q | grep "^[^ ]" | grep -v no | sed 's/.*/* ``\0``/'
+ py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/'
+
+ in a xenial lxd container with python3-pytest-catchlog installed.
+
+Feature Flags
+-------------
+
+.. automodule:: cloudinit.features
+ :members:
- in a xenial lxd container with python3-pytest installed.
Ongoing Refactors
=================
@@ -332,7 +348,7 @@ intended as documentation for developers involved in the refactoring,
but also for other developers who may interact with the code being
refactored in the meantime.
-``cloudinit.net`` -> ``cloudinit.distros.Networking`` Hierarchy
+``cloudinit.net`` -> ``cloudinit.distros.networking`` Hierarchy
---------------------------------------------------------------
``cloudinit.net`` was imported from the curtin codebase as a chunk, and
@@ -352,12 +368,13 @@ there may already be differences in tooling that we currently
work around in less obvious ways.
The high-level plan is to introduce a hierarchy of networking classes
-in ``cloudinit.distros``, which each ``Distro`` subclass will
-reference. These will capture the differences between networking on
-our various distros, while still allowing easy reuse of code between
+in ``cloudinit.distros.networking``, which each ``Distro`` subclass
+will reference. These will capture the differences between networking
+on our various distros, while still allowing easy reuse of code between
distros that share functionality (e.g. most of the Linux networking
-behaviour). Callers will call ``distro.net.func`` instead of
-``cloudinit.net.func``, which will necessitate access to an
+behaviour). ``Distro`` objects will instantiate the networking classes
+at ``self.net``, so callers will call ``distro.net.<func>`` instead of
+``cloudinit.net.<func>``; this will necessitate access to an
instantiated ``Distro`` object.
An implementation note: there may be external consumers of the
@@ -371,9 +388,9 @@ existing API exactly.)
In more detail:
* The root of this hierarchy will be the
- ``cloudinit.distros.Networking`` class. This class will have
- a corresponding method for every ``cloudinit.net`` function that we
- identify to be involved in refactoring. Initially, these methods'
+ ``cloudinit.distros.networking.Networking`` class. This class will
+ have a corresponding method for every ``cloudinit.net`` function that
+ we identify to be involved in refactoring. Initially, these methods'
implementations will simply call the corresponding ``cloudinit.net``
function. (This gives us the complete API from day one, for existing
consumers.)
@@ -398,31 +415,226 @@ In more detail:
its ``net`` attribute. (This is the entry point for existing
consumers to migrate to.)
* Callers of refactored functions will change from calling
- ``cloudinit.net.some_func`` to ``distro.net.some_func``, where
- ``distro`` is an instance of the appropriate ``Distro`` class for
- this system. (This will require making such an instance available to
- callers, which will constitute a large part of the work in this
- project.)
+ ``cloudinit.net.<func>`` to ``distro.net.<func>``, where ``distro``
+ is an instance of the appropriate ``Distro`` class for this system.
+ (This will require making such an instance available to callers,
+ which will constitute a large part of the work in this project.)
After the initial structure is in place, the work in this refactor will
consist of replacing the ``cloudinit.net.some_func`` call in each
-``cloudinit.distros.Networking`` method with the actual implementation.
-This can be done incrementally, one function at a time:
-
-* pick an unmigrated ``cloudinit.distros.Networking`` method
-* refactor all of its callers to call the ``distro.net`` method on
- ``Distro`` instead of the ``cloudinit.net`` function. (This is likely
- to be the most time-consuming step, as it may require plumbing
- ``Distro`` objects through to places that previously have not
- consumed them.)
+``cloudinit.distros.networking.Networking`` method with the actual
+implementation. This can be done incrementally, one function at a
+time:
+
+* pick an unmigrated ``cloudinit.distros.networking.Networking`` method
+* find it in the `the list of bugs tagged net-refactor`_ and assign
+ yourself to it (see :ref:`Managing Work/Tracking Progress` below for
+ more details)
+* refactor all of its callers to call the ``distro.net.<func>`` method
+ on ``Distro`` instead of the ``cloudinit.net.<func>`` function. (This
+ is likely to be the most time-consuming step, as it may require
+ plumbing ``Distro`` objects through to places that previously have
+ not consumed them.)
* refactor its implementation from ``cloudinit.net`` into the
``Networking`` hierarchy (e.g. if it has an if/else on BSD, this is
the time to put the implementations in their respective subclasses)
+
+ * if part of the method contains distro-independent logic, then you
+ may need to create new methods to capture this distro-specific
+ logic; we don't want to replicate common logic in different
+ ``Networking`` subclasses
+ * if after the refactor, the method on the root ``Networking`` class
+ no longer has any implementation, it should be converted to an
+ `abstractmethod`_
+
* ensure that the new implementation has unit tests (either by moving
existing tests, or by writing new ones)
+* ensure that the new implementation has a docstring
+* add any appropriate type annotations
+
+ * note that we must follow the constraints described in the "Type
+ Annotations" section above, so you may not be able to write
+ complete annotations
+ * we have `type aliases`_ defined in ``cloudinit.distros.networking``
+ which should be used when applicable
+
* finally, remove it (and any other now-unused functions) from
cloudinit.net (to avoid having two parallel implementations)
+``cloudinit.net`` Functions/Classes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The functions/classes that need refactoring break down into some broad
+categories:
+
+* helpers for accessing ``/sys`` (that should not be on the top-level
+ ``Networking`` class as they are Linux-specific):
+
+ * ``get_sys_class_path``
+ * ``sys_dev_path``
+ * ``read_sys_net``
+ * ``read_sys_net_safe``
+ * ``read_sys_net_int``
+
+* those that directly access ``/sys`` (via helpers) and should (IMO) be
+ included in the API of the ``Networking`` class:
+
+ * ``generate_fallback_config``
+
+ * the ``config_driver`` parameter is used and passed as a boolean,
+ so we can change the default value to ``False`` (instead of
+ ``None``)
+
+ * ``get_ib_interface_hwaddr``
+ * ``get_interface_mac``
+ * ``interface_has_own_mac``
+ * ``is_bond``
+ * ``is_bridge``
+ * ``is_physical``
+ * ``is_renamed``
+ * ``is_up``
+ * ``is_vlan``
+ * ``wait_for_physdevs``
+
+* those that directly access ``/sys`` (via helpers) but may be
+ Linux-specific concepts or names:
+
+ * ``get_master``
+ * ``device_devid``
+ * ``device_driver``
+
+* those that directly use ``ip``:
+
+ * ``_get_current_rename_info``
+
+ * this has non-distro-specific logic so should potentially be
+ refactored to use helpers on ``self`` instead of ``ip`` directly
+ (rather than being wholesale reimplemented in each of
+ ``BSDNetworking`` or ``LinuxNetworking``)
+ * we can also remove the ``check_downable`` argument, it's never
+ specified so is always ``True``
+
+ * ``_rename_interfaces``
+
+ * this has several internal helper functions which use ``ip``
+ directly, and it calls ``_get_current_rename_info``. That said,
+ there appears to be a lot of non-distro-specific logic that could
+ live in a function on ``Networking``, so this will require some
+ careful refactoring to avoid duplicating that logic in each of
+ ``BSDNetworking`` and ``LinuxNetworking``.
+ * only the ``renames`` and ``current_info`` parameters are ever
+ passed in (and ``current_info`` only by tests), so we can remove
+ the others from the definition
+
+ * ``EphemeralIPv4Network``
+
+ * this is another case where it mixes distro-specific and
+ non-specific functionality. Specifically, ``__init__``,
+ ``__enter__`` and ``__exit__`` are non-specific, and the
+ remaining methods are distro-specific.
+ * when refactoring this, the need to track ``cleanup_cmds`` likely
+ means that the distro-specific behaviour cannot be captured only
+ in the ``Networking`` class. See `this comment in PR #363`_ for
+ more thoughts.
+
+* those that implicitly use ``/sys`` via their call dependencies:
+
+ * ``master_is_bridge_or_bond``
+
+ * appends to ``get_master`` return value, which is a ``/sys`` path
+
+ * ``extract_physdevs``
+
+ * calls ``device_driver`` and ``device_devid`` in both
+ ``_version_*`` impls
+
+ * ``apply_network_config_names``
+
+ * calls ``extract_physdevs``
+ * there is already a ``Distro.apply_network_config_names`` which in
+ the default implementation calls this function; this and its BSD
+ subclass implementations should be refactored at the same time
+ * the ``strict_present`` and ``strict_busy`` parameters are never
+ passed, nor are they used in the function definition, so they can
+ be removed
+
+ * ``get_interfaces``
+
+ * calls ``device_driver``, ``device_devid`` amongst others
+
+ * ``get_ib_hwaddrs_by_interface``
+
+ * calls ``get_interfaces``
+
+* those that may fall into the above categories, but whose use is only
+ related to netfailover (which relies on a Linux-specific network
+ driver, so is unlikely to be relevant elsewhere without a substantial
+ refactor; these probably only need implementing in
+ ``LinuxNetworking``):
+
+ * ``get_dev_features``
+
+ * ``has_netfail_standby_feature``
+
+ * calls ``get_dev_features``
+
+ * ``is_netfailover``
+ * ``is_netfail_master``
+
+ * this is called from ``generate_fallback_config``
+
+ * ``is_netfail_primary``
+ * ``is_netfail_standby``
+
+ * N.B. all of these take an optional ``driver`` argument which is
+ used to pass around a value to avoid having to look it up by
+ calling ``device_driver`` every time. This is something of a leaky
+ abstraction, and is better served by caching on ``device_driver``
+ or storing the cached value on ``self``, so we can drop the
+ parameter from the new API.
+
+* those that use ``/sys`` (via helpers) and have non-exhaustive BSD
+ logic:
+
+ * ``get_devicelist``
+
+* those that already have separate Linux/BSD implementations:
+
+ * ``find_fallback_nic``
+ * ``get_interfaces_by_mac``
+
+* those that have no OS-specific functionality (so do not need to be
+ refactored):
+
+ * ``ParserError``
+ * ``RendererNotFoundError``
+ * ``has_url_connectivity``
+ * ``is_ip_address``
+ * ``is_ipv4_address``
+ * ``natural_sort_key``
+
+Note that the functions in ``cloudinit.net`` use inconsistent parameter
+names for "string that contains a device name"; we can standardise on
+``devname`` (the most common one) in the refactor.
+
+Managing Work/Tracking Progress
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To ensure that we won't have multiple people working on the same part
+of the refactor at the same time, there is a bug for each function.
+You can see the current status by looking at `the list of bugs tagged
+net-refactor`_.
+
+When you're working on refactoring a particular method, ensure that you
+have assigned yourself to the corresponding bug, to avoid duplicate
+work.
+
+Generally, when considering what to pick up to refactor, it is best to
+start with functions in ``cloudinit.net`` which are not called by
+anything else in ``cloudinit.net``. This allows you to focus only on
+refactoring that function and its callsites, rather than having to
+update the other ``cloudinit.net`` function also.
+
References
~~~~~~~~~~
@@ -435,3 +647,7 @@ References
.. _Mina Galić's email the the cloud-init ML in 2018: https://lists.launchpad.net/cloud-init/msg00185.html
.. _Mina Galić's email to the cloud-init ML in 2019: https://lists.launchpad.net/cloud-init/msg00237.html
.. _PR #363: https://github.com/canonical/cloud-init/pull/363
+.. _this comment in PR #363: https://github.com/canonical/cloud-init/pull/363#issuecomment-628829489
+.. _abstractmethod: https://docs.python.org/3/library/abc.html#abc.abstractmethod
+.. _type aliases: https://docs.python.org/3/library/typing.html#type-aliases
+.. _the list of bugs tagged net-refactor: https://bugs.launchpad.net/cloud-init/+bugs?field.tag=net-refactor
diff --git a/README.md b/README.md
index 73bf1ddd..a3455135 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# cloud-init
-[![Build Status](https://travis-ci.org/canonical/cloud-init.svg?branch=master)](https://travis-ci.org/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
+[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=master)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
Cloud-init is the *industry standard* multi-distribution method for
cross-platform cloud instance initialization. It is supported across all
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 939c3126..62ad51fe 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -4,6 +4,7 @@ import calendar
from datetime import datetime
import sys
+from cloudinit import subp
from cloudinit import util
stage_to_description = {
@@ -51,7 +52,7 @@ def parse_timestamp(timestampstr):
def parse_timestamp_from_date(timestampstr):
- out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr])
+ out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr])
timestamp = out.strip()
return float(timestamp)
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index fb152b1d..cca1fa7f 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -11,6 +11,7 @@ import os
import time
import sys
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import uses_systemd
@@ -155,7 +156,7 @@ class SystemctlReader(object):
:return: whether the subp call failed or not
'''
try:
- value, err = util.subp(self.args, capture=True)
+ value, err = subp.subp(self.args, capture=True)
if err:
return err
self.epoch = value
@@ -215,7 +216,7 @@ def gather_timestamps_using_dmesg():
with gather_timestamps_using_systemd
'''
try:
- data, _ = util.subp(['dmesg'], capture=True)
+ data, _ = subp.subp(['dmesg'], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
if i.decode('UTF-8').find('user') != -1:
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
index f4001c14..f69423c3 100644
--- a/cloudinit/analyze/tests/test_boot.py
+++ b/cloudinit/analyze/tests/test_boot.py
@@ -25,7 +25,7 @@ class TestDistroChecker(CiTestCase):
m_get_linux_distro, m_is_FreeBSD):
self.assertEqual(err_code, dist_check_timestamp())
- @mock.patch('cloudinit.util.subp', return_value=(0, 1))
+ @mock.patch('cloudinit.subp.subp', return_value=(0, 1))
def test_subp_fails(self, m_subp):
self.assertEqual(err_code, dist_check_timestamp())
@@ -42,7 +42,7 @@ class TestSystemCtlReader(CiTestCase):
with self.assertRaises(RuntimeError):
reader.parse_epoch_as_float()
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
def test_systemctl_works_correctly_threshold(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
self.assertEqual(1.0, reader.parse_epoch_as_float())
@@ -50,12 +50,12 @@ class TestSystemCtlReader(CiTestCase):
self.assertTrue(thresh < 1e-6)
self.assertTrue(thresh > (-1 * 1e-6))
- @mock.patch('cloudinit.util.subp', return_value=('U=0', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=0', None))
def test_systemctl_succeed_zero(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
self.assertEqual(0.0, reader.parse_epoch_as_float())
- @mock.patch('cloudinit.util.subp', return_value=('U=1', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1', None))
def test_systemctl_succeed_distinct(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
val1 = reader.parse_epoch_as_float()
@@ -64,13 +64,13 @@ class TestSystemCtlReader(CiTestCase):
val2 = reader2.parse_epoch_as_float()
self.assertNotEqual(val1, val2)
- @mock.patch('cloudinit.util.subp', return_value=('100', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('100', None))
def test_systemctl_epoch_not_splittable(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
with self.assertRaises(IndexError):
reader.parse_epoch_as_float()
- @mock.patch('cloudinit.util.subp', return_value=('U=foobar', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None))
def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
with self.assertRaises(ValueError):
@@ -130,7 +130,7 @@ class TestAnalyzeBoot(CiTestCase):
self.assertEqual(err_string, data)
@mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
def test_container_no_ci_log_line(self, m_is_container, m_subp):
path = os.path.dirname(os.path.abspath(__file__))
log_path = path + '/boot-test.log'
@@ -148,7 +148,7 @@ class TestAnalyzeBoot(CiTestCase):
self.assertEqual(FAIL_CODE, finish_code)
@mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
@mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{
'name': 'init-local', 'description': 'starting search', 'timestamp':
100000}])
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
index d6fbd381..dac1efb6 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -5,7 +5,8 @@ from textwrap import dedent
from cloudinit.analyze.dump import (
dump_events, parse_ci_logline, parse_timestamp)
-from cloudinit.util import which, write_file
+from cloudinit.util import write_file
+from cloudinit.subp import which
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index 1f61faa2..485ff92f 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -11,10 +11,10 @@ LOG = logging.getLogger(__name__)
def write_file(filename, content, mode=_DEF_PERMS,
- omode="wb", copy_mode=False):
+ omode="wb", preserve_mode=False):
# open filename in mode 'omode', write content, set permissions to 'mode'
- if copy_mode:
+ if preserve_mode:
try:
file_stat = os.stat(filename)
mode = stat.S_IMODE(file_stat.st_mode)
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 30e49de0..928a8eea 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -10,9 +10,8 @@ import os
import sys
from cloudinit.stages import Init
-from cloudinit.util import (
- ProcessExecutionError, del_dir, del_file, get_config_logfiles,
- is_link, subp)
+from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link)
def error(msg):
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 4c086b51..51c61cca 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -12,8 +12,8 @@ import sys
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.temp_utils import tempdir
-from cloudinit.util import (
- ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file)
+from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.util import (chdir, copy, ensure_dir, write_file)
CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1bc22406..1090aa16 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -57,8 +57,9 @@ def handle_args(name, args):
paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ instance_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index d2dfa8de..ddfd58e1 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -8,7 +8,8 @@ from cloudinit.cmd.devel import logs
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.tests.helpers import (
FilesystemMockingTestCase, mock, wrap_and_call)
-from cloudinit.util import ensure_dir, load_file, subp, write_file
+from cloudinit.subp import subp
+from cloudinit.util import ensure_dir, load_file, write_file
@mock.patch('cloudinit.cmd.devel.logs.os.getuid')
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index e3db8679..0fb48ebd 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -90,8 +90,9 @@ def handle_args(name, args):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ sensitive_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index b1c7b471..73d8719f 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -17,6 +17,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import gpg
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import templater
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -431,7 +432,7 @@ def _should_configure_on_empty_apt():
# if no config was provided, should apt configuration be done?
if util.system_is_snappy():
return False, "system is snappy."
- if not (util.which('apt-get') or util.which('apt')):
+ if not (subp.which('apt-get') or subp.which('apt')):
return False, "no apt commands."
return True, "Apt is available."
@@ -478,7 +479,7 @@ def apply_apt(cfg, cloud, target):
def debconf_set_selections(selections, target=None):
if not selections.endswith(b'\n'):
selections += b'\n'
- util.subp(['debconf-set-selections'], data=selections, target=target,
+ subp.subp(['debconf-set-selections'], data=selections, target=target,
capture=True)
@@ -503,7 +504,7 @@ def dpkg_reconfigure(packages, target=None):
"but cannot be unconfigured: %s", unhandled)
if len(to_config):
- util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
+ subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
list(to_config), data=None, target=target, capture=True)
@@ -546,7 +547,7 @@ def apply_debconf_selections(cfg, target=None):
def clean_cloud_init(target):
"""clean out any local cloud-init config"""
flist = glob.glob(
- util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+ subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
LOG.debug("cleaning cloud-init config from: %s", flist)
for dpkg_cfg in flist:
@@ -575,7 +576,7 @@ def rename_apt_lists(new_mirrors, target, arch):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
default_mirrors = get_default_mirrors(arch)
- pre = util.target_path(target, APT_LISTS)
+ pre = subp.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
nmirror = new_mirrors.get(name)
if not nmirror:
@@ -694,8 +695,8 @@ def add_apt_key_raw(key, target=None):
"""
LOG.debug("Adding key:\n'%s'", key)
try:
- util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
- except util.ProcessExecutionError:
+ subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+ except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
@@ -758,13 +759,13 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
if aa_repo_match(source):
try:
- util.subp(["add-apt-repository", source], target=target)
- except util.ProcessExecutionError:
+ subp.subp(["add-apt-repository", source], target=target)
+ except subp.ProcessExecutionError:
LOG.exception("add-apt-repository failed.")
raise
continue
- sourcefn = util.target_path(target, ent['filename'])
+ sourcefn = subp.target_path(target, ent['filename'])
try:
contents = "%s\n" % (source)
util.write_file(sourcefn, contents, omode="a")
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 6813f534..246e4497 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -16,6 +16,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -99,7 +100,7 @@ def handle(name, cfg, cloud, log, _args):
if iid:
env['INSTANCE_ID'] = str(iid)
cmd = ['/bin/sh', tmpf.name]
- util.subp(cmd, env=env, capture=False)
+ subp.subp(cmd, env=env, capture=False)
except Exception:
util.logexc(log, "Failed to run bootcmd module %s", name)
raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 0b4352c8..9fdaeba1 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -39,6 +39,7 @@ Valid configuration options for this module are:
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
distros = ['ubuntu', 'debian']
@@ -93,6 +94,6 @@ def handle(name, cfg, cloud, log, args):
if len(shcmd):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
log.debug("Setting byobu to %s", value)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 64bc900e..7617a8ea 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -36,6 +36,7 @@ can be removed from the system with the configuration option
import os
+from cloudinit import subp
from cloudinit import util
CA_CERT_PATH = "/usr/share/ca-certificates/"
@@ -51,7 +52,7 @@ def update_ca_certs():
"""
Updates the CA certificate cache on the current machine.
"""
- util.subp(["update-ca-certificates"], capture=False)
+ subp.subp(["update-ca-certificates"], capture=False)
def add_ca_certs(certs):
@@ -85,7 +86,7 @@ def remove_default_ca_certs():
util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
util.write_file(CA_CERT_CONFIG, "", mode=0o644)
debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- util.subp(('debconf-set-selections', '-'), debconf_sel)
+ subp.subp(('debconf-set-selections', '-'), debconf_sel)
def handle(name, cfg, _cloud, log, _args):
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 03285ef0..aaf71366 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -6,79 +6,22 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Chef
-----
-**Summary:** module that configures, starts and installs chef.
-
-This module enables chef to be installed (from packages or
-from gems, or from omnibus). Before this occurs chef configurations are
-written to disk (validation.pem, client.pem, firstboot.json, client.rb),
-and needed chef folders/directories are created (/etc/chef and /var/log/chef
-and so-on). Then once installing proceeds correctly if configured chef will
-be started (in daemon mode or in non-daemon mode) and then once that has
-finished (if ran in non-daemon mode this will be when chef finishes
-converging, if ran in daemon mode then no further actions are possible since
-chef will have forked into its own process) then a post run function can
-run that can do finishing activities (such as removing the validation pem
-file).
-
-**Internal name:** ``cc_chef``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- chef:
- directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
- /var/cache/chef, /var/backups/chef, /var/run/chef)
- validation_cert: (optional string to be written to file validation_key)
- special value 'system' means set use existing file
- validation_key: (optional the path for validation_cert. default
- /etc/chef/validation.pem)
- firstboot_path: (path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to /etc/chef/firstboot.json)
- exec: boolean to run or not run chef (defaults to false, unless
- a gem installed is requested
- where this will then default
- to true)
-
- chef.rb template keys (if falsey, then will be skipped and not
- written to /etc/chef/client.rb)
-
- chef:
- chef_license:
- client_key:
- encrypted_data_bag_secret:
- environment:
- file_backup_path:
- file_cache_path:
- json_attribs:
- log_level:
- log_location:
- node_name:
- omnibus_url:
- omnibus_url_retries:
- omnibus_version:
- pid_file:
- server_url:
- show_time:
- ssl_verify_mode:
- validation_cert:
- validation_key:
- validation_name:
-"""
+"""Chef: module that configures, starts and installs chef."""
import itertools
import json
import os
+from textwrap import dedent
+from cloudinit import subp
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import templater
+from cloudinit import temp_utils
from cloudinit import url_helper
from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+
RUBY_VERSION_DEFAULT = "1.8"
@@ -99,6 +42,8 @@ OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
+CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret'
+CHEF_ENVIRONMENT = '_default'
CHEF_FB_PATH = '/etc/chef/firstboot.json'
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
@@ -108,11 +53,11 @@ CHEF_RB_TPL_DEFAULTS = {
'log_location': '/var/log/chef/client.log',
'validation_key': CHEF_VALIDATION_PEM_PATH,
'validation_cert': None,
- 'client_key': "/etc/chef/client.pem",
+ 'client_key': '/etc/chef/client.pem',
'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': "/var/cache/chef",
- 'file_backup_path': "/var/backups/chef",
- 'pid_file': "/var/run/chef/client.pid",
+ 'file_cache_path': '/var/cache/chef',
+ 'file_backup_path': '/var/backups/chef',
+ 'pid_file': '/var/run/chef/client.pid',
'show_time': True,
'encrypted_data_bag_secret': None,
}
@@ -123,7 +68,6 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([
'client_key',
'file_cache_path',
'json_attribs',
- 'file_cache_path',
'pid_file',
'encrypted_data_bag_secret',
'chef_license',
@@ -143,12 +87,277 @@ CHEF_EXEC_PATH = '/usr/bin/chef-client'
CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
-def is_installed():
- if not os.path.isfile(CHEF_EXEC_PATH):
- return False
- if not os.access(CHEF_EXEC_PATH, os.X_OK):
- return False
- return True
+frequency = PER_ALWAYS
+distros = ["all"]
+schema = {
+ 'id': 'cc_chef',
+ 'name': 'Chef',
+ 'title': 'module that configures, starts and installs chef',
+ 'description': dedent("""\
+ This module enables chef to be installed (from packages,
+ gems, or from omnibus). Before this occurs, chef configuration is
+ written to disk (validation.pem, client.pem, firstboot.json,
+ client.rb), and required directories are created (/etc/chef and
+ /var/log/chef and so-on). If configured, chef will be
+ installed and started in either daemon or non-daemon mode.
+ If run in non-daemon mode, post run actions are executed to do
+ finishing activities such as removing validation.pem."""),
+ 'distros': distros,
+ 'examples': [dedent("""
+ chef:
+ directories:
+ - /etc/chef
+ - /var/log/chef
+ validation_cert: system
+ install_type: omnibus
+ initial_attributes:
+ apache:
+ prefork:
+ maxclients: 100
+ keepalive: off
+ run_list:
+ - recipe[apache2]
+ - role[db]
+ encrypted_data_bag_secret: /etc/chef/encrypted_data_bag_secret
+ environment: _default
+ log_level: :auto
+ omnibus_url_retries: 2
+ server_url: https://chef.yourorg.com:4000
+ ssl_verify_mode: :verify_peer
+ validation_name: yourorg-validator""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'chef': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'directories': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Create the necessary directories for chef to run. By
+ default, it creates the following directories:
+
+ {chef_dirs}""").format(
+ chef_dirs="\n".join(
+ [" - ``{}``".format(d) for d in CHEF_DIRS]
+ )
+ )
+ },
+ 'validation_cert': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional string to be written to file validation_key.
+ Special value ``system`` means set use existing file.
+ """)
+ },
+ 'validation_key': {
+ 'type': 'string',
+ 'default': CHEF_VALIDATION_PEM_PATH,
+ 'description': dedent("""\
+ Optional path for validation_cert. default to
+ ``{}``.""".format(CHEF_VALIDATION_PEM_PATH))
+ },
+ 'firstboot_path': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Path to write run_list and initial_attributes keys that
+ should also be present in this configuration, defaults
+ to ``{}``.""".format(CHEF_FB_PATH))
+ },
+ 'exec': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ define if we should run or not run chef (defaults to
+ false, unless a gem installed is requested where this
+ will then default to true).""")
+ },
+ 'client_key': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['client_key'],
+ 'description': dedent("""\
+ Optional path for client_cert. default to
+ ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key']))
+ },
+ 'encrypted_data_bag_secret': {
+ 'type': 'string',
+ 'default': None,
+ 'description': dedent("""\
+ Specifies the location of the secret key used by chef
+ to encrypt data items. By default, this path is set
+ to None, meaning that chef will have to look at the
+ path ``{}`` for it.
+ """.format(CHEF_ENCRYPTED_DATA_BAG_PATH))
+ },
+ 'environment': {
+ 'type': 'string',
+ 'default': CHEF_ENVIRONMENT,
+ 'description': dedent("""\
+ Specifies which environment chef will use. By default,
+ it will use the ``{}`` configuration.
+ """.format(CHEF_ENVIRONMENT))
+ },
+ 'file_backup_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'],
+ 'description': dedent("""\
+ Specifies the location in which backup files are
+ stored. By default, it uses the
+ ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_backup_path']))
+ },
+ 'file_cache_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'],
+ 'description': dedent("""\
+ Specifies the location in which chef cache files will
+ be saved. By default, it uses the ``{}``
+ location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_cache_path']))
+ },
+ 'json_attribs': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Specifies the location in which some chef json data is
+ stored. By default, it uses the
+ ``{}`` location.""".format(CHEF_FB_PATH))
+ },
+ 'log_level': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_level'],
+ 'description': dedent("""\
+ Defines the level of logging to be stored in the log
+ file. By default this value is set to ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['log_level']))
+ },
+ 'log_location': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_location'],
+ 'description': dedent("""\
+ Specifies the location of the chef lof file. By
+ default, the location is specified at
+ ``{}``.""".format(
+ CHEF_RB_TPL_DEFAULTS['log_location']))
+ },
+ 'node_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the node to run. By default, we will
+ use th instance id as the node name.""")
+ },
+ 'omnibus_url': {
+ 'type': 'string',
+ 'default': OMNIBUS_URL,
+ 'description': dedent("""\
+ Omnibus URL if chef should be installed through
+ Omnibus. By default, it uses the
+ ``{}``.""".format(OMNIBUS_URL))
+ },
+ 'omnibus_url_retries': {
+ 'type': 'integer',
+ 'default': OMNIBUS_URL_RETRIES,
+ 'description': dedent("""\
+ The number of retries that will be attempted to reach
+ the Omnibus URL""")
+ },
+ 'omnibus_version': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional version string to require for omnibus
+ install.""")
+ },
+ 'pid_file': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['pid_file'],
+ 'description': dedent("""\
+ The location in which a process identification
+ number (pid) is saved. By default, it saves
+ in the ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['pid_file']))
+ },
+ 'server_url': {
+ 'type': 'string',
+ 'description': 'The URL for the chef server'
+ },
+ 'show_time': {
+ 'type': 'boolean',
+ 'default': True,
+ 'description': 'Show time in chef logs'
+ },
+ 'ssl_verify_mode': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'],
+ 'description': dedent("""\
+ Set the verify mode for HTTPS requests. We can have
+ two possible values for this parameter:
+
+ - ``:verify_none``: No validation of SSL \
+ certificates.
+ - ``:verify_peer``: Validate all SSL certificates.
+
+ By default, the parameter is set as ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode']))
+ },
+ 'validation_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the chef-validator key that Chef Infra
+ Client uses to access the Chef Infra Server during
+ the initial Chef Infra Client run.""")
+ },
+ 'force_install': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ If set to ``True``, forces chef installation, even
+ if it is already installed.""")
+ },
+ 'initial_attributes': {
+ 'type': 'object',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': dedent("""\
+ Specify a list of initial attributes used by the
+ cookbooks.""")
+ },
+ 'install_type': {
+ 'type': 'string',
+ 'default': 'packages',
+ 'description': dedent("""\
+ The type of installation for chef. It can be one of
+ the following values:
+
+ - ``packages``
+ - ``gems``
+ - ``omnibus``""")
+ },
+ 'run_list': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': 'A run list for a first boot json.'
+ },
+ "chef_license": {
+ 'type': 'string',
+ 'description': dedent("""\
+ string that indicates if user accepts or not license
+ related to some of chef products""")
+ }
+ }
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
def post_run_chef(chef_cfg, log):
@@ -198,6 +407,8 @@ def handle(name, cfg, cloud, log, _args):
log.debug(("Skipping module named %s,"
" no 'chef' key in configuration"), name)
return
+
+ validate_cloudconfig_schema(cfg, schema)
chef_cfg = cfg['chef']
# Ensure the chef directories we use exist
@@ -225,7 +436,7 @@ def handle(name, cfg, cloud, log, _args):
iid = str(cloud.datasource.get_instance_id())
params = get_template_params(iid, chef_cfg, log)
# Do a best effort attempt to ensure that the template values that
- # are associated with paths have there parent directory created
+ # are associated with paths have their parent directory created
# before they are used by the chef-client itself.
param_paths = set()
for (k, v) in params.items():
@@ -255,9 +466,10 @@ def handle(name, cfg, cloud, log, _args):
# Try to install chef, if its not already installed...
force_install = util.get_cfg_option_bool(chef_cfg,
'force_install', default=False)
- if not is_installed() or force_install:
+ installed = subp.is_exe(CHEF_EXEC_PATH)
+ if not installed or force_install:
run = install_chef(cloud, chef_cfg, log)
- elif is_installed():
+ elif installed:
run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
else:
run = False
@@ -282,7 +494,32 @@ def run_chef(chef_cfg, log):
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
+
+
+def subp_blob_in_tempfile(blob, *args, **kwargs):
+ """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
+
+ 'basename' as a kwarg allows providing the basename for the file.
+ The 'args' argument to subp will be updated with the full path to the
+ filename as the first argument.
+ """
+ basename = kwargs.pop('basename', "subp_blob")
+
+ if len(args) == 0 and 'args' not in kwargs:
+ args = [tuple()]
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, basename)
+ if 'args' in kwargs:
+ kwargs['args'] = [tmpf] + list(kwargs['args'])
+ else:
+ args = list(args)
+ args[0] = [tmpf] + args[0]
+
+ util.write_file(tmpf, blob, mode=0o700)
+ return subp.subp(*args, **kwargs)
def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
@@ -305,7 +542,7 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
else:
args = ['-v', omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
- return util.subp_blob_in_tempfile(
+ return subp_blob_in_tempfile(
blob=content, args=args,
basename='chef-omnibus-install', capture=False)
@@ -354,11 +591,11 @@ def install_chef_from_gems(ruby_version, chef_version, distro):
if not os.path.exists('/usr/bin/ruby'):
util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
if chef_version:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'-v %s' % chef_version, '--no-ri',
'--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
else:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'--no-ri', '--no-rdoc', '--bindir',
'/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 885b3138..dff93245 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -26,6 +26,7 @@ by default.
disable_ec2_metadata: <true/false>
"""
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_ALWAYS
@@ -40,15 +41,15 @@ def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
- if util.which('ip'):
+ if subp.which('ip'):
reject_cmd = REJECT_CMD_IP
- elif util.which('ifconfig'):
+ elif subp.which('ifconfig'):
reject_cmd = REJECT_CMD_IF
else:
log.error(('Neither "route" nor "ip" command found, unable to '
'manipulate routing table'))
return
- util.subp(reject_cmd, capture=False)
+ subp.subp(reject_cmd, capture=False)
else:
log.debug(("Skipping module named %s,"
" disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 45925755..d957cfe3 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -99,6 +99,7 @@ specified using ``filesystem``.
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+from cloudinit import subp
import logging
import os
import shlex
@@ -106,13 +107,13 @@ import shlex
frequency = PER_INSTANCE
# Define the commands to use
-UDEVADM_CMD = util.which('udevadm')
-SFDISK_CMD = util.which("sfdisk")
-SGDISK_CMD = util.which("sgdisk")
-LSBLK_CMD = util.which("lsblk")
-BLKID_CMD = util.which("blkid")
-BLKDEV_CMD = util.which("blockdev")
-WIPEFS_CMD = util.which("wipefs")
+UDEVADM_CMD = subp.which('udevadm')
+SFDISK_CMD = subp.which("sfdisk")
+SGDISK_CMD = subp.which("sgdisk")
+LSBLK_CMD = subp.which("lsblk")
+BLKID_CMD = subp.which("blkid")
+BLKDEV_CMD = subp.which("blockdev")
+WIPEFS_CMD = subp.which("wipefs")
LANG_C_ENV = {'LANG': 'C'}
@@ -248,7 +249,7 @@ def enumerate_disk(device, nodeps=False):
info = None
try:
- info, _err = util.subp(lsblk_cmd)
+ info, _err = subp.subp(lsblk_cmd)
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
@@ -310,7 +311,7 @@ def check_fs(device):
blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
try:
- out, _err = util.subp(blkid_cmd, rcs=[0, 2])
+ out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
@@ -433,8 +434,8 @@ def get_dyn_func(*args):
def get_hdd_size(device):
try:
- size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device])
- sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device])
+ size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device])
+ sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device])
except Exception as e:
raise Exception("Failed to get %s size\n%s" % (device, e))
@@ -452,7 +453,7 @@ def check_partition_mbr_layout(device, layout):
read_parttbl(device)
prt_cmd = [SFDISK_CMD, "-l", device]
try:
- out, _err = util.subp(prt_cmd, data="%s\n" % layout)
+ out, _err = subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Error running partition command on %s\n%s" % (
device, e))
@@ -482,7 +483,7 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
prt_cmd = [SGDISK_CMD, '-p', device]
try:
- out, _err = util.subp(prt_cmd, update_env=LANG_C_ENV)
+ out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
raise Exception("Error running partition command on %s\n%s" % (
device, e))
@@ -655,7 +656,7 @@ def purge_disk(device):
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
LOG.info("Purging filesystem on /dev/%s", d['name'])
- util.subp(wipefs_cmd)
+ subp.subp(wipefs_cmd)
except Exception:
raise Exception("Failed FS purge of /dev/%s" % d['name'])
@@ -682,7 +683,7 @@ def read_parttbl(device):
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
util.udevadm_settle()
try:
- util.subp(blkdev_cmd)
+ subp.subp(blkdev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
@@ -697,7 +698,7 @@ def exec_mkpart_mbr(device, layout):
# Create the partitions
prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device]
try:
- util.subp(prt_cmd, data="%s\n" % layout)
+ subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Failed to partition device %s\n%s" % (device, e))
@@ -706,16 +707,16 @@ def exec_mkpart_mbr(device, layout):
def exec_mkpart_gpt(device, layout):
try:
- util.subp([SGDISK_CMD, '-Z', device])
+ subp.subp([SGDISK_CMD, '-Z', device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
- util.subp([SGDISK_CMD,
+ subp.subp([SGDISK_CMD,
'-n', '{}:{}:{}'.format(index, start, end), device])
if partition_type is not None:
# convert to a 4 char (or more) string right padded with 0
# 82 -> 8200. 'Linux' -> 'Linux'
pinput = str(partition_type).ljust(4, "0")
- util.subp(
+ subp.subp(
[SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device])
except Exception:
LOG.warning("Failed to partition device %s", device)
@@ -967,9 +968,9 @@ def mkfs(fs_cfg):
fs_cmd)
else:
# Find the mkfs command
- mkfs_cmd = util.which("mkfs.%s" % fs_type)
+ mkfs_cmd = subp.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
- mkfs_cmd = util.which("mk%s" % fs_type)
+ mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
@@ -994,7 +995,7 @@ def mkfs(fs_cfg):
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", str(fs_cmd))
try:
- util.subp(fs_cmd, shell=shell)
+ subp.subp(fs_cmd, shell=shell)
except Exception as e:
raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index b342e04d..b1d99f97 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -25,7 +25,7 @@ import os
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
+from cloudinit import subp
frequency = PER_ALWAYS
@@ -43,9 +43,9 @@ def is_upstart_system():
del myenv['UPSTART_SESSION']
check_cmd = ['initctl', 'version']
try:
- (out, _err) = util.subp(check_cmd, env=myenv)
+ (out, _err) = subp.subp(check_cmd, env=myenv)
return 'upstart' in out
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.debug("'%s' returned '%s', not using upstart",
' '.join(check_cmd), e.exit_code)
return False
@@ -66,7 +66,7 @@ def handle(name, _cfg, cloud, log, args):
for n in event_names:
cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
try:
- util.subp(cmd)
+ subp.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
log.warning("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 0a135bbe..77984bca 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -39,6 +39,7 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -62,8 +63,8 @@ def stop_update_start(service, config_file, content, systemd=False):
def run(cmd, msg):
try:
- return util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ return subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning("failed: %s (%s): %s", service, cmd, e)
return False
@@ -94,7 +95,7 @@ def handle(name, cfg, cloud, log, args):
util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
distro = cloud.distro
- if not util.which('fanctl'):
+ if not subp.which('fanctl'):
distro.install_packages(['ubuntu-fan'])
stop_update_start(
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index fd141541..3441f7a9 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -78,7 +78,7 @@ def handle(_name, cfg, cloud, log, args):
boot_fin_fn = cloud.paths.boot_finished
try:
contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
- util.write_file(boot_fin_fn, contents)
+ util.write_file(boot_fin_fn, contents, ensure_dir_exists=False)
except Exception:
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 1b512a06..c5d93f81 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -70,6 +70,7 @@ import stat
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -131,19 +132,19 @@ class ResizeGrowPart(object):
myenv['LANG'] = 'C'
try:
- (out, _err) = util.subp(["growpart", "--help"], env=myenv)
+ (out, _err) = subp.subp(["growpart", "--help"], env=myenv)
if re.search(r"--update\s+", out):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
def resize(self, diskdev, partnum, partdev):
before = get_size(partdev)
try:
- util.subp(["growpart", '--dry-run', diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", '--dry-run', diskdev, partnum])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
diskdev, partnum)
@@ -151,8 +152,8 @@ class ResizeGrowPart(object):
return (before, before)
try:
- util.subp(["growpart", diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", diskdev, partnum])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
raise ResizeFailedException(e)
@@ -165,11 +166,11 @@ class ResizeGpart(object):
myenv['LANG'] = 'C'
try:
- (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
+ (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
if re.search(r"gpart recover ", err):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
@@ -182,16 +183,16 @@ class ResizeGpart(object):
be recovered.
"""
try:
- util.subp(["gpart", "recover", diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "recover", diskdev])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 0:
util.logexc(LOG, "Failed: gpart recover %s", diskdev)
raise ResizeFailedException(e)
before = get_size(partdev)
try:
- util.subp(["gpart", "resize", "-i", partnum, diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "resize", "-i", partnum, diskdev])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
raise ResizeFailedException(e)
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index 7888464e..eb03c664 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -43,8 +43,9 @@ seeded with empty values, and install_devices_empty is set to true.
import os
+from cloudinit import subp
from cloudinit import util
-from cloudinit.util import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError
distros = ['ubuntu', 'debian']
@@ -59,7 +60,7 @@ def fetch_idevs(log):
try:
# get the root disk where the /boot directory resides.
- disk = util.subp(['grub-probe', '-t', 'disk', '/boot'],
+ disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'],
capture=True)[0].strip()
except ProcessExecutionError as e:
# grub-common may not be installed, especially on containers
@@ -84,7 +85,7 @@ def fetch_idevs(log):
try:
# check if disk exists and use udevadm to fetch symlinks
- devices = util.subp(
+ devices = subp.subp(
['udevadm', 'info', '--root', '--query=symlink', disk],
capture=True
)[0].strip().split()
@@ -135,7 +136,7 @@ def handle(name, cfg, _cloud, log, _args):
(idevs, idevs_empty))
try:
- util.subp(['debconf-set-selections'], dconf_sel)
+ subp.subp(['debconf-set-selections'], dconf_sel)
except Exception:
util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 3d2ded3d..0f2be52b 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -33,6 +33,7 @@ key can be used. By default ``ssh-dss`` keys are not written to console.
import os
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -64,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):
try:
cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
util.multi_log("%s\n" % (stdout.strip()),
stderr=False, console=True)
except Exception:
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index a9c04d86..299c4d01 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -61,6 +61,7 @@ from io import BytesIO
from configobj import ConfigObj
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -116,7 +117,7 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
- util.subp(["service", "landscape-client", "restart"])
+ subp.subp(["service", "landscape-client", "restart"])
def merge_together(objs):
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 151a9844..7129c9c6 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -48,6 +48,7 @@ lxd-bridge will be configured accordingly.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
import os
@@ -85,16 +86,16 @@ def handle(name, cfg, cloud, log, args):
# Install the needed packages
packages = []
- if not util.which("lxd"):
+ if not subp.which("lxd"):
packages.append('lxd')
- if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
+ if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'):
packages.append('zfsutils-linux')
if len(packages):
try:
cloud.distro.install_packages(packages)
- except util.ProcessExecutionError as exc:
+ except subp.ProcessExecutionError as exc:
log.warning("failed to install packages %s: %s", packages, exc)
return
@@ -104,20 +105,20 @@ def handle(name, cfg, cloud, log, args):
'network_address', 'network_port', 'storage_backend',
'storage_create_device', 'storage_create_loop',
'storage_pool', 'trust_password')
- util.subp(['lxd', 'waitready', '--timeout=300'])
+ subp.subp(['lxd', 'waitready', '--timeout=300'])
cmd = ['lxd', 'init', '--auto']
for k in init_keys:
if init_cfg.get(k):
cmd.extend(["--%s=%s" %
(k.replace('_', '-'), str(init_cfg[k]))])
- util.subp(cmd)
+ subp.subp(cmd)
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
if os.path.exists("/etc/default/lxd-bridge") \
- and util.which(dconf_comm):
+ and subp.which(dconf_comm):
# Bridge configured through packaging
debconf = bridge_to_debconf(bridge_cfg)
@@ -127,7 +128,7 @@ def handle(name, cfg, cloud, log, args):
log.debug("Setting lxd debconf via " + dconf_comm)
data = "\n".join(["set %s %s" % (k, v)
for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
+ subp.subp(['debconf-communicate'], data)
except Exception:
util.logexc(log, "Failed to run '%s' for lxd with" %
dconf_comm)
@@ -137,7 +138,7 @@ def handle(name, cfg, cloud, log, args):
# Run reconfigure
log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
+ subp.subp(['dpkg-reconfigure', 'lxd',
'--frontend=noninteractive'])
else:
# Built-in LXD bridge support
@@ -264,7 +265,7 @@ def _lxc(cmd):
env = {'LC_ALL': 'C',
'HOME': os.environ.get('HOME', '/root'),
'USER': os.environ.get('USER', 'root')}
- util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+ subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
def maybe_cleanup_default(net_name, did_init, create, attach,
@@ -286,7 +287,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["network", "delete", net_name])
LOG.debug(msg, net_name, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, net_name, fail_assume_enoent)
@@ -296,7 +297,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["profile", "device", "remove", profile, nic_name])
LOG.debug(msg, nic_name, profile, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 351183f1..41ea4fc9 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -56,6 +56,7 @@ import io
from configobj import ConfigObj
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
@@ -140,6 +141,6 @@ def handle(name, cfg, cloud, log, _args):
configure(config=mcollective_cfg['conf'])
# restart mcollective to handle updated config
- util.subp(['service', 'mcollective', 'restart'], capture=False)
+ subp.subp(['service', 'mcollective', 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 85a89cd1..773b8285 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -69,6 +69,7 @@ import os.path
import re
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
@@ -252,8 +253,8 @@ def create_swapfile(fname: str, size: str) -> None:
'count=%s' % size]
try:
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, method, e)
util.del_file(fname)
@@ -267,15 +268,15 @@ def create_swapfile(fname: str, size: str) -> None:
else:
try:
create_swap(fname, size, "fallocate")
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, "dd", e)
LOG.warning("Will attempt with dd.")
create_swap(fname, size, "dd")
util.chmod(fname, 0o600)
try:
- util.subp(['mkswap', fname])
- except util.ProcessExecutionError:
+ subp.subp(['mkswap', fname])
+ except subp.ProcessExecutionError:
util.del_file(fname)
raise
@@ -378,17 +379,18 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs = {}
fstab_removed = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- if MNT_COMMENT in line:
- fstab_removed.append(line)
- continue
+ if os.path.exists(FSTAB_PATH):
+ for line in util.load_file(FSTAB_PATH).splitlines():
+ if MNT_COMMENT in line:
+ fstab_removed.append(line)
+ continue
- try:
- toks = WS.split(line)
- except Exception:
- pass
- fstab_devs[toks[0]] = line
- fstab_lines.append(line)
+ try:
+ toks = WS.split(line)
+ except Exception:
+ pass
+ fstab_devs[toks[0]] = line
+ fstab_lines.append(line)
for i in range(len(cfgmnt)):
# skip something that wasn't a list
@@ -538,9 +540,9 @@ def handle(_name, cfg, cloud, log, _args):
for cmd in activate_cmds:
fmt = "Activate mounts: %s:" + ' '.join(cmd)
try:
- util.subp(cmd)
+ subp.subp(cmd)
log.debug(fmt, "PASS")
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 3b2c2020..7d3f73ff 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -14,6 +14,7 @@ from cloudinit import log as logging
from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
@@ -307,7 +308,7 @@ def select_ntp_client(ntp_client, distro):
if distro_ntp_client == "auto":
for client in distro.preferred_ntp_clients:
cfg = distro_cfg.get(client)
- if util.which(cfg.get('check_exe')):
+ if subp.which(cfg.get('check_exe')):
LOG.debug('Selected NTP client "%s", already installed',
client)
clientcfg = cfg
@@ -336,7 +337,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
@param check_exe: string. The name of a binary that indicates the package
the specified package is already installed.
"""
- if util.which(check_exe):
+ if subp.which(check_exe):
return
if packages is None:
packages = ['ntp']
@@ -431,7 +432,7 @@ def reload_ntp(service, systemd=False):
cmd = ['systemctl', 'reload-or-restart', service]
else:
cmd = ['service', service, 'restart']
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def supplemental_schema_validation(ntp_config):
@@ -543,7 +544,7 @@ def handle(name, cfg, cloud, log, _args):
try:
reload_ntp(ntp_client_config['service_name'],
systemd=cloud.distro.uses_systemd())
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 86afffef..036baf85 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -43,6 +43,7 @@ import os
import time
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
REBOOT_FILE = "/var/run/reboot-required"
@@ -57,7 +58,7 @@ def _multi_cfg_bool_get(cfg, *keys):
def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
+ subp.subp(REBOOT_CMD)
start = time.time()
wait_time = initial_sleep
for _i in range(0, wait_attempts):
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 3e81a3c7..41ffb46c 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -56,6 +56,7 @@ import subprocess
import time
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -71,7 +72,7 @@ def givecmdline(pid):
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
- (output, _err) = util.subp(['procstat', '-c', str(pid)])
+ (output, _err) = subp.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
return m.group(2)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index c01f5b8f..635c73bc 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -83,6 +83,7 @@ import yaml
from io import StringIO
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
@@ -105,14 +106,14 @@ class PuppetConstants(object):
def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
+ subp.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
+ subp.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warning(("Sorry we do not know how to enable"
" puppet services on this system"))
@@ -203,6 +204,6 @@ def handle(name, cfg, cloud, log, _args):
_autostart_puppet(log)
# Start puppetd
- util.subp(['service', 'puppet', 'start'], capture=False)
+ subp.subp(['service', 'puppet', 'start'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 01dfc125..8de4db30 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -19,6 +19,7 @@ from textwrap import dedent
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
NOBLOCK = "noblock"
@@ -88,11 +89,11 @@ def _resize_zfs(mount_point, devpth):
def _get_dumpfs_output(mount_point):
- return util.subp(['dumpfs', '-m', mount_point])[0]
+ return subp.subp(['dumpfs', '-m', mount_point])[0]
def _get_gpart_output(part):
- return util.subp(['gpart', 'show', part])[0]
+ return subp.subp(['gpart', 'show', part])[0]
def _can_skip_resize_ufs(mount_point, devpth):
@@ -306,8 +307,8 @@ def handle(name, cfg, _cloud, log, args):
def do_resize(resize_cmd, log):
try:
- util.subp(resize_cmd)
- except util.ProcessExecutionError:
+ subp.subp(resize_cmd)
+ except subp.ProcessExecutionError:
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
raise
# TODO(harlowja): Should we add a fsck check after this to make
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 28c79b83..28d62e9d 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -39,6 +39,7 @@ Subscription`` example config.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -173,7 +174,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
return False
return True
@@ -200,7 +201,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -223,7 +224,7 @@ class SubscriptionManager(object):
# Attempting to register the system only
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -246,7 +247,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
if line != '':
@@ -264,7 +265,7 @@ class SubscriptionManager(object):
cmd = ['attach', '--auto']
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
for line in return_out.split("\n"):
@@ -341,7 +342,7 @@ class SubscriptionManager(object):
"system: %s", (", ".join(pool_list))
.replace('--pool=', ''))
return True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to attach pool {0} "
"due to {1}".format(pool, e))
return False
@@ -414,7 +415,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
@@ -432,11 +433,11 @@ class SubscriptionManager(object):
def _sub_man_cli(cmd, logstring_val=False):
'''
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the prefered cloud-init subprocess def of subp.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
'''
- return util.subp(['subscription-manager'] + cmd,
+ return subp.subp(['subscription-manager'] + cmd,
logstring=logstring_val)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 5df0137d..1354885a 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -182,6 +182,7 @@ import os
import re
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
DEF_FILENAME = "20-cloud-config.conf"
@@ -215,7 +216,7 @@ def reload_syslog(command=DEF_RELOAD, systemd=False):
cmd = ['service', service, 'restart']
else:
cmd = command
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def load_config(cfg):
@@ -429,7 +430,7 @@ def handle(name, cfg, cloud, log, _args):
restarted = reload_syslog(
command=mycfg[KEYNAME_RELOAD],
systemd=cloud.distro.uses_systemd()),
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
restarted = False
log.warning("Failed to reload syslog", e)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 5dd8de37..b61876aa 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -45,7 +45,7 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
-from cloudinit import safeyaml, util
+from cloudinit import safeyaml, subp, util
from cloudinit.distros import rhel_util
@@ -130,6 +130,6 @@ def handle(name, cfg, cloud, log, _args):
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
- util.subp(['service', const.srv_name, 'restart'], capture=False)
+ subp.subp(['service', const.srv_name, 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 588e1b03..1e3f419e 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -24,7 +24,7 @@ module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ALWAYS
@@ -38,7 +38,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 75549b52..5966fb9a 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -27,7 +27,7 @@ the system. As a result per-instance scripts will run again.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -41,7 +41,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 259bdfab..bcca859e 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -25,7 +25,7 @@ be run in alphabetical order. This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ONCE
@@ -39,7 +39,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index d940dbd6..215703ef 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -27,7 +27,7 @@ This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -42,7 +42,7 @@ def handle(name, _cfg, cloud, log, _args):
# go here...
runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index faac9242..e0a4bfff 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -28,6 +28,7 @@ entry under the ``vendor_data`` config key.
import os
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -46,7 +47,7 @@ def handle(name, cfg, cloud, log, _args):
prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
try:
- util.runparts(runparts_path, exe_prefix=prefix)
+ subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index b65f3ed9..4fb9b44e 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -65,6 +65,7 @@ from io import BytesIO
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -92,14 +93,14 @@ def handle_random_seed_command(command, required, env=None):
return
cmd = command[0]
- if not util.which(cmd):
+ if not subp.which(cmd):
if required:
raise ValueError(
"command '{cmd}' not found but required=true".format(cmd=cmd))
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
- util.subp(command, env=env, capture=False)
+ subp.subp(command, env=env, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 7b7aa885..d6b5682d 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -83,6 +83,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import log as logging
from cloudinit.ssh_util import update_ssh_config
+from cloudinit import subp
from cloudinit import util
from string import ascii_letters, digits
@@ -128,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
cmd = list(service_cmd) + ["restart", service_name]
else:
cmd = list(service_cmd) + [service_name, "restart"]
- util.subp(cmd)
+ subp.subp(cmd)
LOG.debug("Restarted the SSH daemon.")
@@ -247,6 +248,6 @@ def chpasswd(distro, plist_in, hashed=False):
distro.set_passwd(u, p, hashed=hashed)
else:
cmd = ['chpasswd'] + (['-e'] if hashed else [])
- util.subp(cmd, plist_in)
+ subp.subp(cmd, plist_in)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 8178562e..20ed7d2f 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -12,6 +12,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
+from cloudinit import subp
from cloudinit import util
@@ -175,7 +176,7 @@ def add_assertions(assertions):
LOG.debug('Snap acking: %s', asrt.split('\n')[0:2])
util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
- util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
+ subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands):
@@ -204,8 +205,8 @@ def run_commands(commands):
for command in fixed_snap_commands:
shell = isinstance(command, str)
try:
- util.subp(command, shell=shell, status_cb=sys.stderr.write)
- except util.ProcessExecutionError as e:
+ subp.subp(command, shell=shell, status_cb=sys.stderr.write)
+ except subp.ProcessExecutionError as e:
cmd_failures.append(str(e))
if cmd_failures:
msg = 'Failures running snap commands:\n{cmd_failures}'.format(
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 1020e944..95083607 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -27,7 +27,7 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/
activation_key: <key>
"""
-from cloudinit import util
+from cloudinit import subp
distros = ['redhat', 'fedora']
@@ -41,9 +41,9 @@ def is_registered():
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
- util.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ subp.subp(['rhn-profile-sync', '--verbose'], capture=False)
already_registered = True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise
return already_registered
@@ -65,7 +65,7 @@ def do_register(server, profile_name,
cmd.extend(['--sslCACert', str(ca_cert_path)])
if activation_key:
cmd.extend(['--activationkey', str(activation_key)])
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 163cce99..228e5e0d 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -116,6 +116,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import ssh_util
+from cloudinit import subp
from cloudinit import util
@@ -164,7 +165,7 @@ def handle(_name, cfg, cloud, log, _args):
try:
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
except Exception:
util.logexc(log, "Failed generated a key for %s from %s",
@@ -186,9 +187,9 @@ def handle(_name, cfg, cloud, log, _args):
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
- out, err = util.subp(cmd, capture=True, env=lang_c)
+ out, err = subp.subp(cmd, capture=True, env=lang_c)
sys.stdout.write(util.decode_binary(out))
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
if (e.exit_code == 1 and
err.lower().startswith("unknown key")):
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 63f87298..856e5a9e 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -31,6 +31,7 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username.
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
import pwd
@@ -101,8 +102,8 @@ def import_ssh_ids(ids, user, log):
log.debug("Importing SSH ids for user %s.", user)
try:
- util.subp(cmd, capture=False)
- except util.ProcessExecutionError as exc:
+ subp.subp(cmd, capture=False)
+ except subp.ProcessExecutionError as exc:
util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 8b6d2a1a..35ded5db 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -8,6 +8,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
@@ -109,8 +110,8 @@ def configure_ua(token=None, enable=None):
attach_cmd = ['ua', 'attach', token]
LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
try:
- util.subp(attach_cmd)
- except util.ProcessExecutionError as e:
+ subp.subp(attach_cmd)
+ except subp.ProcessExecutionError as e:
msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
error=str(e))
util.logexc(LOG, msg)
@@ -119,8 +120,8 @@ def configure_ua(token=None, enable=None):
for service in enable:
try:
cmd = ['ua', 'enable', service]
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
enable_errors.append((service, e))
if enable_errors:
for service, error in enable_errors:
@@ -135,7 +136,7 @@ def configure_ua(token=None, enable=None):
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if util.which('ua'):
+ if subp.which('ua'):
return
try:
cloud.distro.update_package_sources()
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 297451d6..2d1d2b32 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -9,6 +9,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import temp_utils
from cloudinit import type_utils
from cloudinit import util
@@ -108,7 +109,7 @@ def install_drivers(cfg, pkg_install_func):
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
- if not util.which('ubuntu-drivers'):
+ if not subp.which('ubuntu-drivers'):
LOG.debug("'ubuntu-drivers' command not available. "
"Installing ubuntu-drivers-common")
pkg_install_func(['ubuntu-drivers-common'])
@@ -131,7 +132,7 @@ def install_drivers(cfg, pkg_install_func):
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
mode=0o755)
- util.subp([debconf_script, debconf_file])
+ subp.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
LOG, "Failed to register NVIDIA debconf template: %s", str(e))
@@ -141,8 +142,8 @@ def install_drivers(cfg, pkg_install_func):
util.del_dir(tdir)
try:
- util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
- except util.ProcessExecutionError as exc:
+ subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ except subp.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
LOG.warning('the available version of ubuntu-drivers is'
' too old to perform requested driver installation')
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
index 823917c7..b00f2083 100644
--- a/cloudinit/config/tests/test_disable_ec2_metadata.py
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -15,8 +15,8 @@ DISABLE_CFG = {'disable_ec2_metadata': 'true'}
class TestEC2MetadataRoute(CiTestCase):
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ifconfig(self, m_subp, m_which):
"""Set the route if ifconfig command is available"""
m_which.side_effect = lambda x: x if x == 'ifconfig' else None
@@ -25,8 +25,8 @@ class TestEC2MetadataRoute(CiTestCase):
['route', 'add', '-host', '169.254.169.254', 'reject'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ip(self, m_subp, m_which):
"""Set the route if ip command is available"""
m_which.side_effect = lambda x: x if x == 'ip' else None
@@ -35,8 +35,8 @@ class TestEC2MetadataRoute(CiTestCase):
['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_no_tool(self, m_subp, m_which):
"""Log error when neither route nor ip commands are available"""
m_which.return_value = None # Find neither ifconfig nor ip
diff --git a/cloudinit/config/tests/test_final_message.py b/cloudinit/config/tests/test_final_message.py
new file mode 100644
index 00000000..46ba99b2
--- /dev/null
+++ b/cloudinit/config/tests/test_final_message.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_final_message import handle
+
+
+class TestHandle:
+ # TODO: Expand these tests to cover full functionality; currently they only
+ # cover the logic around how the boot-finished file is written (and not its
+ # contents).
+
+ @pytest.mark.parametrize(
+ "instance_dir_exists,file_is_written,expected_log_substring",
+ [
+ (True, True, None),
+ (False, False, "Failed to write boot finished file "),
+ ],
+ )
+ def test_boot_finished_written(
+ self,
+ instance_dir_exists,
+ file_is_written,
+ expected_log_substring,
+ caplog,
+ tmpdir,
+ ):
+ instance_dir = tmpdir.join("var/lib/cloud/instance")
+ if instance_dir_exists:
+ instance_dir.ensure_dir()
+ boot_finished = instance_dir.join("boot-finished")
+
+ m_cloud = mock.Mock(
+ paths=mock.Mock(boot_finished=boot_finished.strpath)
+ )
+
+ handle(None, {}, m_cloud, logging.getLogger(), [])
+
+ # We should not change the status of the instance directory
+ assert instance_dir_exists == instance_dir.exists()
+ assert file_is_written == boot_finished.exists()
+
+ if expected_log_substring:
+ assert expected_log_substring in caplog.text
diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/cloudinit/config/tests/test_grub_dpkg.py
index 01efa330..99c05bb5 100644
--- a/cloudinit/config/tests/test_grub_dpkg.py
+++ b/cloudinit/config/tests/test_grub_dpkg.py
@@ -4,7 +4,7 @@ import pytest
from unittest import mock
from logging import Logger
-from cloudinit.util import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError
from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
@@ -79,7 +79,7 @@ class TestFetchIdevs:
)
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
- @mock.patch("cloudinit.config.cc_grub_dpkg.util.subp")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output,
path_exists, expected_log_call, udevadm_output,
expected_idevs):
@@ -158,7 +158,7 @@ class TestHandle:
@mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
- @mock.patch("cloudinit.config.cc_grub_dpkg.util.subp")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs,
cfg_idevs, cfg_idevs_empty, fetch_idevs_output,
expected_log_output):
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
index 80b54d0f..764a33e3 100644
--- a/cloudinit/config/tests/test_mounts.py
+++ b/cloudinit/config/tests/test_mounts.py
@@ -13,12 +13,12 @@ class TestCreateSwapfile:
@pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other'))
@mock.patch(M_PATH + 'util.get_mount_info')
- @mock.patch(M_PATH + 'util.subp')
+ @mock.patch(M_PATH + 'subp.subp')
def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
swap_file = tmpdir.join("swap-file")
fname = str(swap_file)
- # Some of the calls to util.subp should create the swap file; this
+ # Some of the calls to subp.subp should create the swap file; this
# roughly approximates that
m_subp.side_effect = lambda *args, **kwargs: swap_file.write('')
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index 2732bd60..daa1ef51 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -14,7 +14,7 @@ class TestHandleSshPwauth(CiTestCase):
with_logs = True
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unknown_value_logs_warning(self, m_subp):
setpass.handle_ssh_pwauth("floo")
self.assertIn("Unrecognized value: ssh_pwauth=floo",
@@ -22,7 +22,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.assert_not_called()
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -31,7 +31,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -40,7 +40,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
"""If config is not updated, then no system restart should be done."""
setpass.handle_ssh_pwauth(True)
@@ -48,7 +48,7 @@ class TestHandleSshPwauth(CiTestCase):
self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
"""If 'unchanged', then no updates to config and no restart."""
setpass.handle_ssh_pwauth(
@@ -56,7 +56,7 @@ class TestHandleSshPwauth(CiTestCase):
m_update_ssh_config.assert_not_called()
m_subp.assert_not_called()
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_valid_change_values(self, m_subp):
"""If value is a valid changen value, then update should be called."""
upname = MODPATH + "update_ssh_config"
@@ -88,7 +88,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ssh_pwauth=None\n',
self.logs.getvalue())
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
"""handle parses command password hashes."""
cloud = self.tmp_cloud(distro='ubuntu')
@@ -98,7 +98,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
cfg = {'chpasswd': {'list': valid_hashed_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
@@ -113,7 +113,7 @@ class TestSetPasswordsHandle(CiTestCase):
m_subp.call_args_list)
@mock.patch(MODPATH + "util.is_BSD")
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
self, m_subp, m_is_bsd):
"""BSD don't use chpasswd"""
@@ -130,7 +130,7 @@ class TestSetPasswordsHandle(CiTestCase):
m_subp.call_args_list)
@mock.patch(MODPATH + "util.is_BSD")
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
m_is_bsd):
"""handle parses command set random passwords."""
@@ -140,7 +140,7 @@ class TestSetPasswordsHandle(CiTestCase):
'root:R',
'ubuntu:RANDOM']
cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index 95270fa0..6d4c014a 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -92,7 +92,7 @@ class TestAddAssertions(CiTestCase):
super(TestAddAssertions, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_on_empty_list(self, m_subp):
"""When provided with an empty list, add_assertions does nothing."""
add_assertions([])
@@ -107,7 +107,7 @@ class TestAddAssertions(CiTestCase):
"assertion parameter was not a list or dict: I'm Not Valid",
str(context_manager.exception))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_list(self, m_subp):
"""When provided with a list, add_assertions adds all assertions."""
self.assertEqual(
@@ -130,7 +130,7 @@ class TestAddAssertions(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_dict(self, m_subp):
"""When provided with a dict, add_assertions adds all assertions."""
self.assertEqual(
@@ -168,7 +168,7 @@ class TestRunCommands(CiTestCase):
super(TestRunCommands, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_run_commands_on_empty_list(self, m_subp):
"""When provided with an empty list, run_commands does nothing."""
run_commands([])
@@ -477,7 +477,7 @@ class TestHandle(CiTestCase):
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_handle_adds_assertions(self, m_subp):
"""Any configured snap assertions are provided to add_assertions."""
assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
@@ -493,7 +493,7 @@ class TestHandle(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
@skipUnlessJsonSchema()
def test_handle_validates_schema(self, m_subp):
"""Any provided configuration is runs validate_cloudconfig_schema."""
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index 8c4161ef..db7fb726 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -3,7 +3,7 @@
from cloudinit.config.cc_ubuntu_advantage import (
configure_ua, handle, maybe_install_ua_tools, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import (
CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
@@ -26,10 +26,10 @@ class TestConfigureUA(CiTestCase):
super(TestConfigureUA, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_error(self, m_subp):
"""Errors from ua attach command are raised."""
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
'Invalid token SomeToken')
with self.assertRaises(RuntimeError) as context_manager:
configure_ua(token='SomeToken')
@@ -39,7 +39,7 @@ class TestConfigureUA(CiTestCase):
'Stdout: Invalid token SomeToken\nStderr: -',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_token(self, m_subp):
"""When token is provided, attach the machine to ua using the token."""
configure_ua(token='SomeToken')
@@ -48,7 +48,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_on_service_error(self, m_subp):
"""all services should be enabled and then any failures raised"""
@@ -56,7 +56,7 @@ class TestConfigureUA(CiTestCase):
fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
if cmd in fail_cmds and capture:
svc = cmd[-1]
- raise util.ProcessExecutionError(
+ raise subp.ProcessExecutionError(
'Invalid {} credentials'.format(svc.upper()))
m_subp.side_effect = fake_subp
@@ -83,7 +83,7 @@ class TestConfigureUA(CiTestCase):
'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_empty_services(self, m_subp):
"""When services is an empty list, do not auto-enable attach."""
configure_ua(token='SomeToken', enable=[])
@@ -92,7 +92,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_specific_services(self, m_subp):
"""When services a list, only enable specific services."""
configure_ua(token='SomeToken', enable=['fips'])
@@ -105,7 +105,7 @@ class TestConfigureUA(CiTestCase):
self.logs.getvalue())
@mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_string_services(self, m_subp):
"""When services a string, treat as singleton list and warn"""
configure_ua(token='SomeToken', enable='fips')
@@ -119,7 +119,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_weird_services(self, m_subp):
"""When services not string or list, warn but still attach"""
configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
@@ -285,7 +285,7 @@ class TestMaybeInstallUATools(CiTestCase):
super(TestMaybeInstallUATools, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
"""Do nothing if ubuntu-advantage-tools already exists."""
m_which.return_value = '/usr/bin/ua' # already installed
@@ -294,7 +294,7 @@ class TestMaybeInstallUATools(CiTestCase):
'Some apt error')
maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
"""maybe_install_ua_tools logs and raises apt update errors."""
m_which.return_value = None
@@ -306,7 +306,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertEqual('Some apt error', str(context_manager.exception))
self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_raises_install_errors(self, m_which):
"""maybe_install_ua_tools logs and raises package install errors."""
m_which.return_value = None
@@ -320,7 +320,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertIn(
'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_happy_path(self, m_which):
"""maybe_install_ua_tools installs ubuntu-advantage-tools."""
m_which.return_value = None
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
index 0aec1265..504ba356 100644
--- a/cloudinit/config/tests/test_ubuntu_drivers.py
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -7,7 +7,7 @@ from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
from cloudinit.config import cc_ubuntu_drivers as drivers
-from cloudinit.util import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError
MPATH = "cloudinit.config.cc_ubuntu_drivers."
M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
@@ -53,8 +53,8 @@ class TestUbuntuDrivers(CiTestCase):
schema=drivers.schema, strict=True)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_happy_path_taken(
self, config, m_which, m_subp, m_tmp):
"""Positive path test through handle. Package should be installed."""
@@ -80,8 +80,8 @@ class TestUbuntuDrivers(CiTestCase):
self._assert_happy_path_taken(new_config)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_handle_raises_error_if_no_drivers_found(
self, m_which, m_subp, m_tmp):
"""If ubuntu-drivers doesn't install any drivers, raise an error."""
@@ -109,8 +109,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertIn('ubuntu-drivers found no drivers for installation',
self.logs.getvalue())
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_inert_with_config(self, config, m_which, m_subp):
"""Helper to reduce repetition when testing negative cases"""
myCloud = mock.MagicMock()
@@ -154,8 +154,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, m_install_drivers.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=True)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=True)
def test_install_drivers_no_install_if_present(
self, m_which, m_subp, m_tmp):
"""If 'ubuntu-drivers' is present, no package install should occur."""
@@ -181,8 +181,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, pkg_install.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
self, m_which, m_subp, m_tmp):
"""Older ubuntu-drivers versions should emit message and raise error"""
@@ -219,8 +219,8 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
tdir = self.tmp_dir()
debconf_file = os.path.join(tdir, 'nvidia.template')
diff --git a/cloudinit/conftest.py b/cloudinit/conftest.py
deleted file mode 100644
index 37cbbcda..00000000
--- a/cloudinit/conftest.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from unittest import mock
-
-import pytest
-
-from cloudinit import util
-
-
-@pytest.yield_fixture(autouse=True)
-def disable_subp_usage(request):
- """
- Across all (pytest) tests, ensure that util.subp is not invoked.
-
- Note that this can only catch invocations where the util module is imported
- and ``util.subp(...)`` is called. ``from cloudinit.util import subp``
- imports happen before the patching here (or the CiTestCase monkey-patching)
- happens, so are left untouched.
-
- To allow a particular test method or class to use util.subp you can set the
- parameter passed to this fixture to False using pytest.mark.parametrize::
-
- @pytest.mark.parametrize("disable_subp_usage", [False], indirect=True)
- def test_whoami(self):
- util.subp(["whoami"])
-
- To instead allow util.subp usage for a specific command, you can set the
- parameter passed to this fixture to that command:
-
- @pytest.mark.parametrize("disable_subp_usage", ["bash"], indirect=True)
- def test_bash(self):
- util.subp(["bash"])
-
- To specify multiple commands, set the parameter to a list (note the
- double-layered list: we specify a single parameter that is itself a list):
-
- @pytest.mark.parametrize(
- "disable_subp_usage", ["bash", "whoami"], indirect=True)
- def test_several_things(self):
- util.subp(["bash"])
- util.subp(["whoami"])
-
- This fixture (roughly) mirrors the functionality of
- CiTestCase.allowed_subp. N.B. While autouse fixtures do affect non-pytest
- tests, CiTestCase's allowed_subp does take precedence (and we have
- TestDisableSubpUsageInTestSubclass to confirm that).
- """
- should_disable = getattr(request, "param", True)
- if should_disable:
- if not isinstance(should_disable, (list, str)):
- def side_effect(args, *other_args, **kwargs):
- raise AssertionError("Unexpectedly used util.subp")
- else:
- # Look this up before our patch is in place, so we have access to
- # the real implementation in side_effect
- subp = util.subp
-
- if isinstance(should_disable, str):
- should_disable = [should_disable]
-
- def side_effect(args, *other_args, **kwargs):
- cmd = args[0]
- if cmd not in should_disable:
- raise AssertionError(
- "Unexpectedly used util.subp to call {} (allowed:"
- " {})".format(cmd, ",".join(should_disable))
- )
- return subp(args, *other_args, **kwargs)
-
- with mock.patch('cloudinit.util.subp', autospec=True) as m_subp:
- m_subp.side_effect = side_effect
- yield
- else:
- yield
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 35a10590..2fc91bbc 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -25,9 +25,14 @@ from cloudinit.net import network_state
from cloudinit.net import renderers
from cloudinit import ssh_util
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
+from cloudinit.features import \
+ ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
+
from cloudinit.distros.parsers import hosts
+from .networking import LinuxNetworking
# Used when a cloud-config module can be run on all cloud-init distibutions.
@@ -66,11 +71,13 @@ class Distro(metaclass=abc.ABCMeta):
init_cmd = ['service'] # systemctl, service etc
renderer_configs = {}
_preferred_ntp_clients = None
+ networking_cls = LinuxNetworking
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
+ self.networking = self.networking_cls()
@abc.abstractmethod
def install_packages(self, pkglist):
@@ -225,8 +232,8 @@ class Distro(metaclass=abc.ABCMeta):
LOG.debug("Non-persistently setting the system hostname to %s",
hostname)
try:
- util.subp(['hostname', hostname])
- except util.ProcessExecutionError:
+ subp.subp(['hostname', hostname])
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Failed to non-persistently adjust the system "
"hostname to %s", hostname)
@@ -361,12 +368,12 @@ class Distro(metaclass=abc.ABCMeta):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -480,7 +487,7 @@ class Distro(metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding user %s", name)
try:
- util.subp(useradd_cmd, logstring=log_useradd_cmd)
+ subp.subp(useradd_cmd, logstring=log_useradd_cmd)
except Exception as e:
util.logexc(LOG, "Failed to create user %s", name)
raise e
@@ -500,7 +507,7 @@ class Distro(metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding snap user %s", name)
try:
- (out, err) = util.subp(create_user_cmd, logstring=create_user_cmd,
+ (out, err) = subp.subp(create_user_cmd, logstring=create_user_cmd,
capture=True)
LOG.debug("snap create-user returned: %s:%s", out, err)
jobj = util.load_json(out)
@@ -582,20 +589,20 @@ class Distro(metaclass=abc.ABCMeta):
# passwd must use short '-l' due to SLES11 lacking long form '--lock'
lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name])
try:
- cmd = next(tool for tool in lock_tools if util.which(tool[0]))
+ cmd = next(tool for tool in lock_tools if subp.which(tool[0]))
except StopIteration:
raise RuntimeError((
"Unable to lock user account '%s'. No tools available. "
" Tried: %s.") % (name, [c[0] for c in lock_tools]))
try:
- util.subp(cmd)
+ subp.subp(cmd)
except Exception as e:
util.logexc(LOG, 'Failed to disable password for user %s', name)
raise e
def expire_passwd(self, user):
try:
- util.subp(['passwd', '--expire', user])
+ subp.subp(['passwd', '--expire', user])
except Exception as e:
util.logexc(LOG, "Failed to set 'expire' for %s", user)
raise e
@@ -611,7 +618,7 @@ class Distro(metaclass=abc.ABCMeta):
cmd.append('-e')
try:
- util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
+ subp.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
except Exception as e:
util.logexc(LOG, "Failed to set password for %s", user)
raise e
@@ -708,7 +715,7 @@ class Distro(metaclass=abc.ABCMeta):
LOG.warning("Skipping creation of existing group '%s'", name)
else:
try:
- util.subp(group_add_cmd)
+ subp.subp(group_add_cmd)
LOG.info("Created new group %s", name)
except Exception:
util.logexc(LOG, "Failed to create group %s", name)
@@ -721,7 +728,7 @@ class Distro(metaclass=abc.ABCMeta):
"; user does not exist.", member, name)
continue
- util.subp(['usermod', '-a', '-G', name, member])
+ subp.subp(['usermod', '-a', '-G', name, member])
LOG.info("Added user '%s' to group '%s'", member, name)
@@ -845,7 +852,12 @@ def _get_package_mirror_info(mirror_info, data_source=None,
# ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
# the region is us-east-1. so region = az[0:-1]
if _EC2_AZ_RE.match(data_source.availability_zone):
- subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
+ ec2_region = data_source.availability_zone[0:-1]
+
+ if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES:
+ subst['ec2_region'] = "%s" % ec2_region
+ elif data_source.platform_type == "ec2":
+ subst['ec2_region'] = "%s" % ec2_region
if data_source and data_source.region:
subst['region'] = data_source.region
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 9f89c5f9..038aa9ac 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -8,6 +8,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -44,7 +45,7 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
+ subp.subp(['locale-gen', '-G', locale], capture=False)
# "" provides trailing newline during join
lines = [
util.make_header(),
@@ -76,11 +77,11 @@ class Distro(distros.Distro):
def _enable_interface(self, device_name):
cmd = ['netctl', 'reenable', device_name]
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
def _bring_up_interface(self, device_name):
@@ -88,12 +89,12 @@ class Distro(distros.Distro):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -158,7 +159,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
@@ -173,8 +174,8 @@ def _render_network(entries, target="/", conf_dir="etc/netctl",
devs = []
nameservers = []
- resolv_conf = util.target_path(target, resolv_conf)
- conf_dir = util.target_path(target, conf_dir)
+ resolv_conf = subp.target_path(target, resolv_conf)
+ conf_dir = subp.target_path(target, conf_dir)
for (dev, info) in entries.items():
if dev == 'lo':
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index 37cf93bf..2ed7a7d5 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -5,12 +5,15 @@ from cloudinit.distros import bsd_utils
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import net
+from cloudinit import subp
from cloudinit import util
+from .networking import BSDNetworking
LOG = logging.getLogger(__name__)
class BSD(distros.Distro):
+ networking_cls = BSDNetworking
hostname_conf_fn = '/etc/rc.conf'
rc_conf_fn = "/etc/rc.conf"
@@ -50,7 +53,7 @@ class BSD(distros.Distro):
else:
group_add_cmd = self.group_add_cmd_prefix + [name]
try:
- util.subp(group_add_cmd)
+ subp.subp(group_add_cmd)
LOG.info("Created new group %s", name)
except Exception:
util.logexc(LOG, "Failed to create group %s", name)
@@ -63,7 +66,7 @@ class BSD(distros.Distro):
"; user does not exist.", member, name)
continue
try:
- util.subp(self._get_add_member_to_group_cmd(member, name))
+ subp.subp(self._get_add_member_to_group_cmd(member, name))
LOG.info("Added user '%s' to group '%s'", member, name)
except Exception:
util.logexc(LOG, "Failed to add user '%s' to group '%s'",
@@ -111,7 +114,7 @@ class BSD(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
+ subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
def _write_network_config(self, netconfig):
return self._supported_write_network_config(netconfig)
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 128bb523..844aaf21 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -13,6 +13,7 @@ import os
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -197,7 +198,7 @@ class Distro(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
util.log_time(logfunc=LOG.debug,
msg="apt-%s [%s]" % (command, ' '.join(cmd)),
- func=util.subp,
+ func=subp.subp,
args=(cmd,), kwargs={'env': e, 'capture': False})
def update_package_sources(self):
@@ -214,7 +215,7 @@ def _get_wrapper_prefix(cmd, mode):
if (util.is_true(mode) or
(str(mode).lower() == "auto" and cmd[0] and
- util.which(cmd[0]))):
+ subp.which(cmd[0]))):
return cmd
else:
return []
@@ -269,7 +270,7 @@ def update_locale_conf(locale, sys_path, keyname='LANG'):
"""Update system locale config"""
LOG.debug('Updating %s with locale setting %s=%s',
sys_path, keyname, locale)
- util.subp(
+ subp.subp(
['update-locale', '--locale-file=' + sys_path,
'%s=%s' % (keyname, locale)], capture=False)
@@ -291,7 +292,7 @@ def regenerate_locale(locale, sys_path, keyname='LANG'):
# finally, trigger regeneration
LOG.debug('Generating locales for %s', locale)
- util.subp(['locale-gen', locale], capture=False)
+ subp.subp(['locale-gen', locale], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index b3a4ad67..dde34d41 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -10,6 +10,7 @@ from io import StringIO
import cloudinit.distros.bsd
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -78,7 +79,7 @@ class Distro(cloudinit.distros.bsd.BSD):
# Run the command
LOG.info("Adding user %s", name)
try:
- util.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
+ subp.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
except Exception:
util.logexc(LOG, "Failed to create user %s", name)
raise
@@ -90,7 +91,7 @@ class Distro(cloudinit.distros.bsd.BSD):
def expire_passwd(self, user):
try:
- util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
+ subp.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
@@ -102,7 +103,7 @@ class Distro(cloudinit.distros.bsd.BSD):
hash_opt = "-h"
try:
- util.subp(['pw', 'usermod', user, hash_opt, '0'],
+ subp.subp(['pw', 'usermod', user, hash_opt, '0'],
data=passwd, logstring="chpasswd for %s" % user)
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
@@ -110,7 +111,7 @@ class Distro(cloudinit.distros.bsd.BSD):
def lock_passwd(self, name):
try:
- util.subp(['pw', 'usermod', name, '-h', '-'])
+ subp.subp(['pw', 'usermod', name, '-h', '-'])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
@@ -131,8 +132,8 @@ class Distro(cloudinit.distros.bsd.BSD):
try:
LOG.debug("Running cap_mkdb for %s", locale)
- util.subp(['cap_mkdb', self.login_conf_fn])
- except util.ProcessExecutionError:
+ subp.subp(['cap_mkdb', self.login_conf_fn])
+ except subp.ProcessExecutionError:
# cap_mkdb failed, so restore the backup.
util.logexc(LOG, "Failed to apply locale %s", locale)
try:
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index dc57717d..2bee1c89 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -9,6 +9,7 @@
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import net_util
@@ -39,7 +40,7 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
+ subp.subp(['locale-gen', '-G', locale], capture=False)
# "" provides trailing newline during join
lines = [
util.make_header(),
@@ -94,11 +95,11 @@ class Distro(distros.Distro):
cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev),
'default']
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed",
cmd)
@@ -119,12 +120,12 @@ class Distro(distros.Distro):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -137,11 +138,11 @@ class Distro(distros.Distro):
# Grab device names from init scripts
cmd = ['ls', '/etc/init.d/net.*']
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
devices = [x.split('.')[2] for x in _out.split(' ')]
@@ -208,7 +209,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index ecc8239a..f1a9b182 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -8,6 +8,7 @@ import platform
import cloudinit.distros.bsd
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -76,7 +77,7 @@ class NetBSD(cloudinit.distros.bsd.BSD):
# Run the command
LOG.info("Adding user %s", name)
try:
- util.subp(adduser_cmd, logstring=log_adduser_cmd)
+ subp.subp(adduser_cmd, logstring=log_adduser_cmd)
except Exception:
util.logexc(LOG, "Failed to create user %s", name)
raise
@@ -99,11 +100,12 @@ class NetBSD(cloudinit.distros.bsd.BSD):
else:
method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
hashed_pw = crypt.crypt(
- passwd,
- crypt.mksalt(method))
+ passwd,
+ crypt.mksalt(method)
+ )
try:
- util.subp(['usermod', '-p', hashed_pw, user])
+ subp.subp(['usermod', '-p', hashed_pw, user])
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
raise
@@ -111,21 +113,21 @@ class NetBSD(cloudinit.distros.bsd.BSD):
def force_passwd_change(self, user):
try:
- util.subp(['usermod', '-F', user])
+ subp.subp(['usermod', '-F', user])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
def lock_passwd(self, name):
try:
- util.subp(['usermod', '-C', 'yes', name])
+ subp.subp(['usermod', '-C', 'yes', name])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
def unlock_passwd(self, name):
try:
- util.subp(['usermod', '-C', 'no', name])
+ subp.subp(['usermod', '-C', 'no', name])
except Exception:
util.logexc(LOG, "Failed to unlock user %s", name)
raise
@@ -142,8 +144,9 @@ class NetBSD(cloudinit.distros.bsd.BSD):
os_arch = platform.machine()
e = os.environ.copy()
e['PKG_PATH'] = (
- 'http://cdn.netbsd.org/pub/pkgsrc/'
- 'packages/NetBSD/%s/%s/All') % (os_arch, os_release)
+ 'http://cdn.netbsd.org/pub/pkgsrc/'
+ 'packages/NetBSD/%s/%s/All'
+ ) % (os_arch, os_release)
return e
def update_package_sources(self):
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
new file mode 100644
index 00000000..e421a2ce
--- /dev/null
+++ b/cloudinit/distros/networking.py
@@ -0,0 +1,140 @@
+import abc
+import os
+
+from cloudinit import net
+
+
+# Type aliases (https://docs.python.org/3/library/typing.html#type-aliases),
+# used to make the signatures of methods a little clearer
+DeviceName = str
+NetworkConfig = dict
+
+
+class Networking(metaclass=abc.ABCMeta):
+ """The root of the Networking hierarchy in cloud-init.
+
+ This is part of an ongoing refactor in the cloud-init codebase, for more
+ details see "``cloudinit.net`` -> ``cloudinit.distros.networking``
+ Hierarchy" in HACKING.rst for full details.
+ """
+
+ def _get_current_rename_info(self) -> dict:
+ return net._get_current_rename_info()
+
+ def _rename_interfaces(self, renames: list, *, current_info=None) -> None:
+ return net._rename_interfaces(renames, current_info=current_info)
+
+ def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
+ return net.apply_network_config_names(netcfg)
+
+ def device_devid(self, devname: DeviceName):
+ return net.device_devid(devname)
+
+ def device_driver(self, devname: DeviceName):
+ return net.device_driver(devname)
+
+ def extract_physdevs(self, netcfg: NetworkConfig) -> list:
+ return net.extract_physdevs(netcfg)
+
+ def find_fallback_nic(self, *, blacklist_drivers=None):
+ return net.find_fallback_nic(blacklist_drivers=blacklist_drivers)
+
+ def generate_fallback_config(
+ self, *, blacklist_drivers=None, config_driver: bool = False
+ ):
+ return net.generate_fallback_config(
+ blacklist_drivers=blacklist_drivers, config_driver=config_driver
+ )
+
+ def get_devicelist(self) -> list:
+ return net.get_devicelist()
+
+ def get_ib_hwaddrs_by_interface(self) -> dict:
+ return net.get_ib_hwaddrs_by_interface()
+
+ def get_ib_interface_hwaddr(
+ self, devname: DeviceName, ethernet_format: bool
+ ):
+ return net.get_ib_interface_hwaddr(devname, ethernet_format)
+
+ def get_interface_mac(self, devname: DeviceName):
+ return net.get_interface_mac(devname)
+
+ def get_interfaces(self) -> list:
+ return net.get_interfaces()
+
+ def get_interfaces_by_mac(self) -> dict:
+ return net.get_interfaces_by_mac()
+
+ def get_master(self, devname: DeviceName):
+ return net.get_master(devname)
+
+ def interface_has_own_mac(
+ self, devname: DeviceName, *, strict: bool = False
+ ) -> bool:
+ return net.interface_has_own_mac(devname, strict=strict)
+
+ def is_bond(self, devname: DeviceName) -> bool:
+ return net.is_bond(devname)
+
+ def is_bridge(self, devname: DeviceName) -> bool:
+ return net.is_bridge(devname)
+
+ @abc.abstractmethod
+ def is_physical(self, devname: DeviceName) -> bool:
+ """
+ Is ``devname`` a physical network device?
+
+ Examples of non-physical network devices: bonds, bridges, tunnels,
+ loopback devices.
+ """
+ pass
+
+ def is_renamed(self, devname: DeviceName) -> bool:
+ return net.is_renamed(devname)
+
+ def is_up(self, devname: DeviceName) -> bool:
+ return net.is_up(devname)
+
+ def is_vlan(self, devname: DeviceName) -> bool:
+ return net.is_vlan(devname)
+
+ def master_is_bridge_or_bond(self, devname: DeviceName) -> bool:
+ return net.master_is_bridge_or_bond(devname)
+
+ def wait_for_physdevs(
+ self, netcfg: NetworkConfig, *, strict: bool = True
+ ) -> None:
+ return net.wait_for_physdevs(netcfg, strict=strict)
+
+
+class BSDNetworking(Networking):
+ """Implementation of networking functionality shared across BSDs."""
+
+ def is_physical(self, devname: DeviceName) -> bool:
+ raise NotImplementedError()
+
+
+class LinuxNetworking(Networking):
+ """Implementation of networking functionality common to Linux distros."""
+
+ def get_dev_features(self, devname: DeviceName) -> str:
+ return net.get_dev_features(devname)
+
+ def has_netfail_standby_feature(self, devname: DeviceName) -> bool:
+ return net.has_netfail_standby_feature(devname)
+
+ def is_netfailover(self, devname: DeviceName) -> bool:
+ return net.is_netfailover(devname)
+
+ def is_netfail_master(self, devname: DeviceName) -> bool:
+ return net.is_netfail_master(devname)
+
+ def is_netfail_primary(self, devname: DeviceName) -> bool:
+ return net.is_netfail_primary(devname)
+
+ def is_netfail_standby(self, devname: DeviceName) -> bool:
+ return net.is_netfail_standby(devname)
+
+ def is_physical(self, devname: DeviceName) -> bool:
+ return os.path.exists(net.sys_dev_path(devname, "device"))
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
index ca094156..720c9cf3 100644
--- a/cloudinit/distros/openbsd.py
+++ b/cloudinit/distros/openbsd.py
@@ -7,6 +7,7 @@ import platform
import cloudinit.distros.netbsd
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -27,7 +28,7 @@ class Distro(cloudinit.distros.netbsd.NetBSD):
def lock_passwd(self, name):
try:
- util.subp(['usermod', '-p', '*', name])
+ subp.subp(['usermod', '-p', '*', name])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
@@ -41,9 +42,10 @@ class Distro(cloudinit.distros.netbsd.NetBSD):
os_arch = platform.machine()
e = os.environ.copy()
e['PKG_PATH'] = (
- 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
- 'packages/{os_arch}/').format(
- os_arch=os_arch, os_release=os_release)
+ 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
+ 'packages/{os_arch}/').format(
+ os_arch=os_arch, os_release=os_release
+ )
return e
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 68028d20..ffb7d0e8 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -14,6 +14,7 @@ from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import rhel_util as rhutil
@@ -97,7 +98,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
@@ -129,7 +130,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
+ (out, _err) = subp.subp(['hostname'])
if len(out):
return out
else:
@@ -163,7 +164,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
util.write_file(out_fn, hostname)
elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
conf = None
try:
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index f55d96f7..c72f7c17 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -11,6 +11,7 @@
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import rhel_util
@@ -83,7 +84,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
util.write_file(out_fn, hostname)
elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
'HOSTNAME': hostname,
@@ -108,7 +109,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
+ (out, _err) = subp.subp(['hostname'])
if len(out):
return out
else:
@@ -146,7 +147,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- if util.which('dnf'):
+ if subp.which('dnf'):
LOG.debug('Using DNF for package management')
cmd = ['dnf']
else:
@@ -173,7 +174,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py
index 40939133..db534654 100644
--- a/cloudinit/distros/tests/test_init.py
+++ b/cloudinit/distros/tests/test_init.py
@@ -67,6 +67,9 @@ class TestGetPackageMirrorInfo:
assert {'primary': 'http://other'} == _get_package_mirror_info(
mirror_info, mirror_filter=lambda x: False)
+ @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [
+ (True, 'ec2')
+ ])
@pytest.mark.parametrize('availability_zone,region,patterns,expected', (
# Test ec2_region alone
('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'],
@@ -120,16 +123,34 @@ class TestGetPackageMirrorInfo:
['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu'])
for invalid_char in INVALID_URL_CHARS
))
- def test_substitution(self, availability_zone, region, patterns, expected):
+ def test_valid_substitution(self,
+ allow_ec2_mirror,
+ platform_type,
+ availability_zone,
+ region,
+ patterns,
+ expected):
"""Test substitution works as expected."""
+ flag_path = "cloudinit.distros." \
+ "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+
m_data_source = mock.Mock(
- availability_zone=availability_zone, region=region
+ availability_zone=availability_zone,
+ region=region,
+ platform_type=platform_type
)
mirror_info = {'search': {'primary': patterns}}
- ret = _get_package_mirror_info(
- mirror_info,
- data_source=m_data_source,
- mirror_filter=lambda x: x
- )
+ with mock.patch(flag_path, allow_ec2_mirror):
+ ret = _get_package_mirror_info(
+ mirror_info,
+ data_source=m_data_source,
+ mirror_filter=lambda x: x
+ )
+ print(allow_ec2_mirror)
+ print(platform_type)
+ print(availability_zone)
+ print(region)
+ print(patterns)
+ print(expected)
assert {'primary': expected} == ret
diff --git a/cloudinit/distros/tests/test_networking.py b/cloudinit/distros/tests/test_networking.py
new file mode 100644
index 00000000..2acb12f4
--- /dev/null
+++ b/cloudinit/distros/tests/test_networking.py
@@ -0,0 +1,42 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit.distros.networking import BSDNetworking, LinuxNetworking
+
+
+@pytest.yield_fixture
+def sys_class_net(tmpdir):
+ sys_class_net_path = tmpdir.join("sys/class/net")
+ sys_class_net_path.ensure_dir()
+ with mock.patch(
+ "cloudinit.net.get_sys_class_path",
+ return_value=sys_class_net_path.strpath + "/",
+ ):
+ yield sys_class_net_path
+
+
+class TestBSDNetworkingIsPhysical:
+ def test_raises_notimplementederror(self):
+ with pytest.raises(NotImplementedError):
+ BSDNetworking().is_physical("eth0")
+
+
+class TestLinuxNetworkingIsPhysical:
+ def test_returns_false_by_default(self, sys_class_net):
+ assert not LinuxNetworking().is_physical("eth0")
+
+ def test_returns_false_if_devname_exists_but_not_physical(
+ self, sys_class_net
+ ):
+ devname = "eth0"
+ sys_class_net.join(devname).mkdir()
+ assert not LinuxNetworking().is_physical(devname)
+
+ def test_returns_true_if_device_is_physical(self, sys_class_net):
+ devname = "eth0"
+ device_dir = sys_class_net.join(devname)
+ device_dir.mkdir()
+ device_dir.join("device").write("")
+
+ assert LinuxNetworking().is_physical(devname)
diff --git a/cloudinit/features.py b/cloudinit/features.py
new file mode 100644
index 00000000..c44fa29e
--- /dev/null
+++ b/cloudinit/features.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""
+Feature flags are used as a way to easily toggle configuration
+**at build time**. They are provided to accommodate feature deprecation and
+downstream configuration changes.
+
+Currently used upstream values for feature flags are set in
+``cloudinit/features.py``. Overrides to these values (typically via quilt
+patch) can be placed
+in a file called ``feature_overrides.py`` in the same directory. Any value
+set in ``feature_overrides.py`` will override the original value set
+in ``features.py``.
+
+Each flag should include a short comment regarding the reason for
+the flag and intended lifetime.
+
+Tests are required for new feature flags, and tests must verify
+all valid states of a flag, not just the default state.
+"""
+
+ERROR_ON_USER_DATA_FAILURE = True
+"""
+If there is a failure in obtaining user data (i.e., #include or
+decompress fails), old behavior is to log a warning and proceed.
+After the 20.2 release, we instead raise an exception.
+This flag can be removed after Focal is no longer supported
+"""
+
+
+ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES = False
+"""
+When configuring apt mirrors, old behavior is to allow
+the use of ec2 mirrors if the datasource availability_zone format
+matches one of the possible aws ec2 regions. After the 20.2 release, we
+no longer publish ec2 region mirror urls on non-AWS cloud platforms.
+Besides feature_overrides.py, users can override this by providing
+#cloud-config apt directives.
+"""
+
+try:
+ # pylint: disable=wildcard-import
+ from cloudinit.feature_overrides import * # noqa
+except ImportError:
+ pass
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 7fe17a2e..72b5ac59 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -8,7 +8,7 @@
"""gpg.py - Collection of gpg key related functions"""
from cloudinit import log as logging
-from cloudinit import util
+from cloudinit import subp
import time
@@ -18,9 +18,9 @@ LOG = logging.getLogger(__name__)
def export_armour(key):
"""Export gpg key, armoured key gets returned"""
try:
- (armour, _) = util.subp(["gpg", "--export", "--armour", key],
+ (armour, _) = subp.subp(["gpg", "--export", "--armour", key],
capture=True)
- except util.ProcessExecutionError as error:
+ except subp.ProcessExecutionError as error:
# debug, since it happens for any key not on the system initially
LOG.debug('Failed to export armoured key "%s": %s', key, error)
armour = None
@@ -51,11 +51,11 @@ def recv_key(key, keyserver, retries=(1, 1)):
while True:
trynum += 1
try:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
key, keyserver, trynum)
return
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
error = e
try:
naplen = next(sleeps)
@@ -72,9 +72,9 @@ def recv_key(key, keyserver, retries=(1, 1)):
def delete_key(key):
"""Delete the specified key from the local gpg ring"""
try:
- util.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
+ subp.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
capture=True)
- except util.ProcessExecutionError as error:
+ except subp.ProcessExecutionError as error:
LOG.warning('Failed delete key "%s": %s', key, error)
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index dca50a49..c6205097 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -12,6 +12,7 @@ import os
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
@@ -48,8 +49,8 @@ class BootHookPartHandler(handlers.Handler):
env = os.environ.copy()
if self.instance_id is not None:
env['INSTANCE_ID'] = str(self.instance_id)
- util.subp([filepath], env=env)
- except util.ProcessExecutionError:
+ subp.subp([filepath], env=env)
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Boothooks script %s execution error", filepath)
except Exception:
util.logexc(LOG, "Boothooks unknown error when running %s",
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 003cad60..a9d29537 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -13,6 +13,7 @@ import re
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import (PER_INSTANCE)
@@ -52,7 +53,7 @@ class UpstartJobPartHandler(handlers.Handler):
util.write_file(path, payload, 0o644)
if SUITABLE_UPSTART:
- util.subp(["initctl", "reload-configuration"], capture=False)
+ subp.subp(["initctl", "reload-configuration"], capture=False)
def _has_suitable_upstart():
@@ -63,7 +64,7 @@ def _has_suitable_upstart():
if not os.path.exists("/sbin/initctl"):
return False
try:
- (version_out, _err) = util.subp(["initctl", "version"])
+ (version_out, _err) = subp.subp(["initctl", "version"])
except Exception:
util.logexc(LOG, "initctl version failed")
return False
@@ -77,7 +78,7 @@ def _has_suitable_upstart():
if not os.path.exists("/usr/bin/dpkg-query"):
return False
try:
- (dpkg_ver, _err) = util.subp(["dpkg-query",
+ (dpkg_ver, _err) = subp.subp(["dpkg-query",
"--showformat=${Version}",
"--show", "upstart"], rcs=[0, 1])
except Exception:
@@ -86,9 +87,9 @@ def _has_suitable_upstart():
try:
good = "1.8-0ubuntu1.2"
- util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
+ subp.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
return True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code == 1:
pass
else:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 8af24fa9..9d8c7ba9 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -12,6 +12,7 @@ import os
import re
from functools import partial
+from cloudinit import subp
from cloudinit import util
from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit.url_helper import UrlError, readurl
@@ -98,10 +99,6 @@ def is_up(devname):
return read_sys_net_safe(devname, "operstate", translate=translate)
-def is_wireless(devname):
- return os.path.exists(sys_dev_path(devname, "wireless"))
-
-
def is_bridge(devname):
return os.path.exists(sys_dev_path(devname, "bridge"))
@@ -265,28 +262,6 @@ def is_vlan(devname):
return 'DEVTYPE=vlan' in uevent.splitlines()
-def is_connected(devname):
- # is_connected isn't really as simple as that. 2 is
- # 'physically connected'. 3 is 'not connected'. but a wlan interface will
- # always show 3.
- iflink = read_sys_net_safe(devname, "iflink")
- if iflink == "2":
- return True
- if not is_wireless(devname):
- return False
- LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname)
- return read_sys_net_safe(devname, "carrier",
- translate={'0': False, '1': True})
-
-
-def is_physical(devname):
- return os.path.exists(sys_dev_path(devname, "device"))
-
-
-def is_present(devname):
- return os.path.exists(sys_dev_path(devname))
-
-
def device_driver(devname):
"""Return the device driver for net device named 'devname'."""
driver = None
@@ -358,7 +333,7 @@ def find_fallback_nic_on_freebsd(blacklist_drivers=None):
we'll use the first interface from ``ifconfig -l -u ether``
"""
- stdout, _stderr = util.subp(['ifconfig', '-l', '-u', 'ether'])
+ stdout, _stderr = subp.subp(['ifconfig', '-l', '-u', 'ether'])
values = stdout.split()
if values:
return values[0]
@@ -620,9 +595,9 @@ def _get_current_rename_info(check_downable=True):
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
- ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent',
+ ipv6, _err = subp.subp(['ip', '-6', 'addr', 'show', 'permanent',
'scope', 'global'], capture=True)
- ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True)
+ ipv4, _err = subp.subp(['ip', '-4', 'addr', 'show'], capture=True)
nics_with_addresses = set()
for bytes_out in (ipv6, ipv4):
@@ -658,13 +633,13 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
for data in cur_info.values())
def rename(cur, new):
- util.subp(["ip", "link", "set", cur, "name", new], capture=True)
+ subp.subp(["ip", "link", "set", cur, "name", new], capture=True)
def down(name):
- util.subp(["ip", "link", "set", name, "down"], capture=True)
+ subp.subp(["ip", "link", "set", name, "down"], capture=True)
def up(name):
- util.subp(["ip", "link", "set", name, "up"], capture=True)
+ subp.subp(["ip", "link", "set", name, "up"], capture=True)
ops = []
errors = []
@@ -819,7 +794,7 @@ def get_interfaces_by_mac():
def get_interfaces_by_mac_on_freebsd():
- (out, _) = util.subp(['ifconfig', '-a', 'ether'])
+ (out, _) = subp.subp(['ifconfig', '-a', 'ether'])
# flatten each interface block in a single line
def flatten(out):
@@ -848,9 +823,10 @@ def get_interfaces_by_mac_on_freebsd():
def get_interfaces_by_mac_on_netbsd():
ret = {}
re_field_match = (
- r"(?P<ifname>\w+).*address:\s"
- r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
- (out, _) = util.subp(['ifconfig', '-a'])
+ r"(?P<ifname>\w+).*address:\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
+ )
+ (out, _) = subp.subp(['ifconfig', '-a'])
if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
for line in if_lines:
m = re.match(re_field_match, line)
@@ -865,7 +841,7 @@ def get_interfaces_by_mac_on_openbsd():
re_field_match = (
r"(?P<ifname>\w+).*lladdr\s"
r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
- (out, _) = util.subp(['ifconfig', '-a'])
+ (out, _) = subp.subp(['ifconfig', '-a'])
if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
for line in if_lines:
m = re.match(re_field_match, line)
@@ -1067,11 +1043,11 @@ class EphemeralIPv4Network(object):
def __exit__(self, excp_type, excp_value, excp_traceback):
"""Teardown anything we set up."""
for cmd in self.cleanup_cmds:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def _delete_address(self, address, prefix):
"""Perform the ip command to remove the specified address."""
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'addr', 'del',
'%s/%s' % (address, prefix), 'dev', self.interface],
capture=True)
@@ -1083,11 +1059,11 @@ class EphemeralIPv4Network(object):
'Attempting setup of ephemeral network on %s with %s brd %s',
self.interface, cidr, self.broadcast)
try:
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast',
self.broadcast, 'dev', self.interface],
capture=True, update_env={'LANG': 'C'})
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if "File exists" not in e.stderr:
raise
LOG.debug(
@@ -1095,7 +1071,7 @@ class EphemeralIPv4Network(object):
self.interface, self.ip)
else:
# Address creation success, bring up device and queue cleanup
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
'up'], capture=True)
self.cleanup_cmds.append(
@@ -1112,7 +1088,7 @@ class EphemeralIPv4Network(object):
via_arg = []
if gateway != "0.0.0.0/0":
via_arg = ['via', gateway]
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', net_address] + via_arg +
['dev', self.interface], capture=True)
self.cleanup_cmds.insert(
@@ -1122,20 +1098,20 @@ class EphemeralIPv4Network(object):
def _bringup_router(self):
"""Perform the ip commands to fully setup the router if needed."""
# Check if a default route exists and exit if it does
- out, _ = util.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
+ out, _ = subp.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
if 'default' in out:
LOG.debug(
'Skip ephemeral route setup. %s already has default route: %s',
self.interface, out.strip())
return
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
'src', self.ip], capture=True)
self.cleanup_cmds.insert(
0,
['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
'src', self.ip])
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', 'default', 'via', self.router,
'dev', self.interface], capture=True)
self.cleanup_cmds.insert(
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index fb714d4c..e34e0454 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -5,6 +5,7 @@ import re
from cloudinit import log as logging
from cloudinit import net
from cloudinit import util
+from cloudinit import subp
from cloudinit.distros.parsers.resolv_conf import ResolvConf
from cloudinit.distros import bsd_utils
@@ -18,11 +19,11 @@ class BSDRenderer(renderer.Renderer):
rc_conf_fn = 'etc/rc.conf'
def get_rc_config_value(self, key):
- fn = util.target_path(self.target, self.rc_conf_fn)
+ fn = subp.target_path(self.target, self.rc_conf_fn)
bsd_utils.get_rc_config_value(key, fn=fn)
def set_rc_config_value(self, key, value):
- fn = util.target_path(self.target, self.rc_conf_fn)
+ fn = subp.target_path(self.target, self.rc_conf_fn)
bsd_utils.set_rc_config_value(key, value, fn=fn)
def __init__(self, config=None):
@@ -65,8 +66,9 @@ class BSDRenderer(renderer.Renderer):
if subnet.get('type') == 'static':
if not subnet.get('netmask'):
LOG.debug(
- 'Skipping IP %s, because there is no netmask',
- subnet.get('address'))
+ 'Skipping IP %s, because there is no netmask',
+ subnet.get('address')
+ )
continue
LOG.debug('Configuring dev %s with %s / %s', device_name,
subnet.get('address'), subnet.get('netmask'))
@@ -111,12 +113,12 @@ class BSDRenderer(renderer.Renderer):
# Try to read the /etc/resolv.conf or just start from scratch if that
# fails.
try:
- resolvconf = ResolvConf(util.load_file(util.target_path(
+ resolvconf = ResolvConf(util.load_file(subp.target_path(
target, self.resolv_conf_fn)))
resolvconf.parse()
except IOError:
util.logexc(LOG, "Failed to parse %s, use new empty file",
- util.target_path(target, self.resolv_conf_fn))
+ subp.target_path(target, self.resolv_conf_fn))
resolvconf = ResolvConf('')
resolvconf.parse()
@@ -134,7 +136,7 @@ class BSDRenderer(renderer.Renderer):
except ValueError:
util.logexc(LOG, "Failed to add search domain %s", domain)
util.write_file(
- util.target_path(target, self.resolv_conf_fn),
+ subp.target_path(target, self.resolv_conf_fn),
str(resolvconf), 0o644)
def render_network_state(self, network_state, templates=None, target=None):
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 19d0199c..9e004688 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -17,6 +17,7 @@ from cloudinit.net import (
has_url_connectivity)
from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -39,10 +40,11 @@ class NoDHCPLeaseError(Exception):
class EphemeralDHCPv4(object):
- def __init__(self, iface=None, connectivity_url=None):
+ def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None):
self.iface = iface
self._ephipv4 = None
self.lease = None
+ self.dhcp_log_func = dhcp_log_func
self.connectivity_url = connectivity_url
def __enter__(self):
@@ -80,7 +82,8 @@ class EphemeralDHCPv4(object):
if self.lease:
return self.lease
try:
- leases = maybe_perform_dhcp_discovery(self.iface)
+ leases = maybe_perform_dhcp_discovery(
+ self.iface, self.dhcp_log_func)
except InvalidDHCPLeaseFileError:
raise NoDHCPLeaseError()
if not leases:
@@ -130,13 +133,15 @@ class EphemeralDHCPv4(object):
result[internal_mapping] = self.lease.get(different_names)
-def maybe_perform_dhcp_discovery(nic=None):
+def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
If the nic is invalid or undiscoverable or dhclient command is not found,
skip dhcp_discovery and return an empty dict.
@param nic: Name of the network interface we want to run dhclient on.
+ @param dhcp_log_func: A callable accepting the dhclient output and error
+ streams.
@return: A list of dicts representing dhcp options for each lease obtained
from the dhclient discovery if run, otherwise an empty list is
returned.
@@ -150,7 +155,7 @@ def maybe_perform_dhcp_discovery(nic=None):
LOG.debug(
'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic)
return []
- dhclient_path = util.which('dhclient')
+ dhclient_path = subp.which('dhclient')
if not dhclient_path:
LOG.debug('Skip dhclient configuration: No dhclient command found.')
return []
@@ -158,7 +163,7 @@ def maybe_perform_dhcp_discovery(nic=None):
prefix='cloud-init-dhcp-',
needs_exe=True) as tdir:
# Use /var/tmp because /run/cloud-init/tmp is mounted noexec
- return dhcp_discovery(dhclient_path, nic, tdir)
+ return dhcp_discovery(dhclient_path, nic, tdir, dhcp_log_func)
def parse_dhcp_lease_file(lease_file):
@@ -192,13 +197,15 @@ def parse_dhcp_lease_file(lease_file):
return dhcp_leases
-def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
+def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
@param interface: Name of the network inteface on which to dhclient.
@param cleandir: The directory from which to run dhclient as well as store
dhcp leases.
+ @param dhcp_log_func: A callable accepting the dhclient output and error
+ streams.
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
@@ -219,10 +226,10 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
- util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
+ subp.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
'-pf', pid_file, interface, '-sf', '/bin/true']
- util.subp(cmd, capture=True)
+ out, err = subp.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
# named by the pid file to daemonize (have pid 1 as its parent). If we
@@ -239,6 +246,7 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
return []
ppid = 'unknown'
+ daemonized = False
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
try:
@@ -250,13 +258,17 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
if ppid == 1:
LOG.debug('killing dhclient with pid=%s', pid)
os.kill(pid, signal.SIGKILL)
- return parse_dhcp_lease_file(lease_file)
+ daemonized = True
+ break
time.sleep(0.01)
- LOG.error(
- 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
- pid_content, ppid, 0.01 * 1000
- )
+ if not daemonized:
+ LOG.error(
+ 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s '
+ 'seconds', pid_content, ppid, 0.01 * 1000
+ )
+ if dhcp_log_func is not None:
+ dhcp_log_func(out, err)
return parse_dhcp_lease_file(lease_file)
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 2f714563..b4c69457 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -11,6 +11,7 @@ from . import renderer
from .network_state import subnet_is_ipv6
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
@@ -511,13 +512,13 @@ class Renderer(renderer.Renderer):
return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
def render_network_state(self, network_state, templates=None, target=None):
- fpeni = util.target_path(target, self.eni_path)
+ fpeni = subp.target_path(target, self.eni_path)
util.ensure_dir(os.path.dirname(fpeni))
header = self.eni_header if self.eni_header else ""
util.write_file(fpeni, header + self._render_interfaces(network_state))
if self.netrules_path:
- netrules = util.target_path(target, self.netrules_path)
+ netrules = subp.target_path(target, self.netrules_path)
util.ensure_dir(os.path.dirname(netrules))
util.write_file(netrules,
self._render_persistent_net(network_state))
@@ -544,9 +545,9 @@ def available(target=None):
expected = ['ifquery', 'ifup', 'ifdown']
search = ['/sbin', '/usr/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
- eni = util.target_path(target, 'etc/network/interfaces')
+ eni = subp.target_path(target, 'etc/network/interfaces')
if not os.path.isfile(eni):
return False
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index 60f05bb2..0285dfec 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -2,6 +2,7 @@
from cloudinit import log as logging
import cloudinit.net.bsd
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -30,17 +31,17 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
LOG.debug("freebsd generate postcmd disabled")
return
- util.subp(['service', 'netif', 'restart'], capture=True)
+ subp.subp(['service', 'netif', 'restart'], capture=True)
# On FreeBSD 10, the restart of routing and dhclient is likely to fail
# because
# - routing: it cannot remove the loopback route, but it will still set
# up the default route as expected.
# - dhclient: it cannot stop the dhclient started by the netif service.
# In both case, the situation is ok, and we can proceed.
- util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
+ subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
for dhcp_interface in self.dhcp_interfaces():
- util.subp(['service', 'dhclient', 'restart', dhcp_interface],
+ subp.subp(['service', 'dhclient', 'restart', dhcp_interface],
rcs=[0, 1],
capture=True)
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
index 9cc8ef31..71b38ee6 100644
--- a/cloudinit/net/netbsd.py
+++ b/cloudinit/net/netbsd.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
import cloudinit.net.bsd
@@ -16,8 +17,9 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
if self.dhcp_interfaces():
self.set_rc_config_value('dhcpcd', 'YES')
self.set_rc_config_value(
- 'dhcpcd_flags',
- ' '.join(self.dhcp_interfaces()))
+ 'dhcpcd_flags',
+ ' '.join(self.dhcp_interfaces())
+ )
for device_name, v in self.interface_configurations.items():
if isinstance(v, dict):
self.set_rc_config_value(
@@ -29,9 +31,9 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
LOG.debug("netbsd generate postcmd disabled")
return
- util.subp(['service', 'network', 'restart'], capture=True)
+ subp.subp(['service', 'network', 'restart'], capture=True)
if self.dhcp_interfaces():
- util.subp(['service', 'dhcpcd', 'restart'], capture=True)
+ subp.subp(['service', 'dhcpcd', 'restart'], capture=True)
def set_route(self, network, netmask, gateway):
if network == '0.0.0.0':
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 89855270..53347c83 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -8,6 +8,7 @@ from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit import safeyaml
from cloudinit.net import SYS_CLASS_NET, get_devicelist
@@ -164,14 +165,14 @@ def _extract_bond_slaves_by_name(interfaces, entry, bond_master):
def _clean_default(target=None):
# clean out any known default files and derived files in target
# LP: #1675576
- tpath = util.target_path(target, "etc/netplan/00-snapd-config.yaml")
+ tpath = subp.target_path(target, "etc/netplan/00-snapd-config.yaml")
if not os.path.isfile(tpath):
return
content = util.load_file(tpath, decode=False)
if content != KNOWN_SNAPD_CONFIG:
return
- derived = [util.target_path(target, f) for f in (
+ derived = [subp.target_path(target, f) for f in (
'run/systemd/network/10-netplan-all-en.network',
'run/systemd/network/10-netplan-all-eth.network',
'run/systemd/generator/netplan.stamp')]
@@ -203,10 +204,10 @@ class Renderer(renderer.Renderer):
def features(self):
if self._features is None:
try:
- info_blob, _err = util.subp(self.NETPLAN_INFO, capture=True)
+ info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True)
info = util.load_yaml(info_blob)
self._features = info['netplan.io']['features']
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
# if the info subcommand is not present then we don't have any
# new features
pass
@@ -218,7 +219,7 @@ class Renderer(renderer.Renderer):
# check network state for version
# if v2, then extract network_state.config
# else render_v2_from_state
- fpnplan = os.path.join(util.target_path(target), self.netplan_path)
+ fpnplan = os.path.join(subp.target_path(target), self.netplan_path)
util.ensure_dir(os.path.dirname(fpnplan))
header = self.netplan_header if self.netplan_header else ""
@@ -239,7 +240,7 @@ class Renderer(renderer.Renderer):
if not run:
LOG.debug("netplan generate postcmd disabled")
return
- util.subp(self.NETPLAN_GENERATE, capture=True)
+ subp.subp(self.NETPLAN_GENERATE, capture=True)
def _net_setup_link(self, run=False):
"""To ensure device link properties are applied, we poke
@@ -253,7 +254,7 @@ class Renderer(renderer.Renderer):
for cmd in [setup_lnk + [SYS_CLASS_NET + iface]
for iface in get_devicelist() if
os.path.islink(SYS_CLASS_NET + iface)]:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def _render_content(self, network_state):
@@ -406,7 +407,7 @@ def available(target=None):
expected = ['netplan']
search = ['/usr/sbin', '/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
return True
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 35c279f9..7bfe8be0 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -215,7 +215,7 @@ class NetworkState(object):
return (
route.get('prefix') == 0
and route.get('network') in default_nets
- )
+ )
class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
@@ -722,10 +722,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
item_params = dict((key, value) for (key, value) in
item_cfg.items() if key not in
NETWORK_V2_KEY_FILTER)
- # we accept the fixed spelling, but write the old for compatability
+ # we accept the fixed spelling, but write the old for compatibility
# Xenial does not have an updated netplan which supports the
# correct spelling. LP: #1756701
- params = item_params['parameters']
+ params = item_params.get('parameters', {})
grat_value = params.pop('gratuitous-arp', None)
if grat_value:
params['gratuitious-arp'] = grat_value
@@ -734,8 +734,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
'type': cmd_type,
'name': item_name,
cmd_type + '_interfaces': item_cfg.get('interfaces'),
- 'params': dict((v2key_to_v1[k], v) for k, v in
- item_params.get('parameters', {}).items())
+ 'params': dict((v2key_to_v1[k], v) for k, v in params.items())
}
if 'mtu' in item_cfg:
v1_cmd['mtu'] = item_cfg['mtu']
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
index b9897e90..166d77e6 100644
--- a/cloudinit/net/openbsd.py
+++ b/cloudinit/net/openbsd.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
import cloudinit.net.bsd
@@ -12,14 +13,15 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
def write_config(self):
for device_name, v in self.interface_configurations.items():
if_file = 'etc/hostname.{}'.format(device_name)
- fn = util.target_path(self.target, if_file)
+ fn = subp.target_path(self.target, if_file)
if device_name in self.dhcp_interfaces():
content = 'dhcp\n'
elif isinstance(v, dict):
try:
content = "inet {address} {netmask}\n".format(
- address=v['address'],
- netmask=v['netmask'])
+ address=v['address'],
+ netmask=v['netmask']
+ )
except KeyError:
LOG.error(
"Invalid static configuration for %s",
@@ -30,12 +32,12 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
if not self._postcmds:
LOG.debug("openbsd generate postcmd disabled")
return
- util.subp(['sh', '/etc/netstart'], capture=True)
+ subp.subp(['sh', '/etc/netstart'], capture=True)
def set_route(self, network, netmask, gateway):
if network == '0.0.0.0':
if_file = 'etc/mygate'
- fn = util.target_path(self.target, if_file)
+ fn = subp.target_path(self.target, if_file)
content = gateway + '\n'
util.write_file(fn, content)
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 0a387377..0a5d481d 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -9,6 +9,7 @@ from configobj import ConfigObj
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit.distros.parsers import networkmanager_conf
from cloudinit.distros.parsers import resolv_conf
@@ -504,7 +505,7 @@ class Renderer(renderer.Renderer):
iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
else:
iface_cfg['IPV6ADDR_SECONDARIES'] += \
- " " + ipv6_cidr
+ " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
@@ -858,19 +859,19 @@ class Renderer(renderer.Renderer):
if not templates:
templates = self.templates
file_mode = 0o644
- base_sysconf_dir = util.target_path(target, self.sysconf_dir)
+ base_sysconf_dir = subp.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
network_state, self.flavor,
templates=templates).items():
util.write_file(path, data, file_mode)
if self.dns_path:
- dns_path = util.target_path(target, self.dns_path)
+ dns_path = subp.target_path(target, self.dns_path)
resolv_content = self._render_dns(network_state,
existing_dns_path=dns_path)
if resolv_content:
util.write_file(dns_path, resolv_content, file_mode)
if self.networkmanager_conf_path:
- nm_conf_path = util.target_path(target,
+ nm_conf_path = subp.target_path(target,
self.networkmanager_conf_path)
nm_conf_content = self._render_networkmanager_conf(network_state,
templates)
@@ -878,12 +879,12 @@ class Renderer(renderer.Renderer):
util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
netrules_content = self._render_persistent_net(network_state)
- netrules_path = util.target_path(target, self.netrules_path)
+ netrules_path = subp.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
if available_nm(target=target):
- enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE))
+ enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
- sysconfig_path = util.target_path(target, templates.get('control'))
+ sysconfig_path = subp.target_path(target, templates.get('control'))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
if sysconfig_path.endswith('network'):
util.ensure_dir(os.path.dirname(sysconfig_path))
@@ -906,20 +907,20 @@ def available_sysconfig(target=None):
expected = ['ifup', 'ifdown']
search = ['/sbin', '/usr/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
expected_paths = [
'etc/sysconfig/network-scripts/network-functions',
'etc/sysconfig/config']
for p in expected_paths:
- if os.path.isfile(util.target_path(target, p)):
+ if os.path.isfile(subp.target_path(target, p)):
return True
return False
def available_nm(target=None):
- if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)):
+ if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)):
return False
return True
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index 7768da7c..7987f9d1 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -266,7 +266,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
'Skip dhcp_discovery: nic idontexist not found in get_devicelist.',
self.logs.getvalue())
- @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.subp.which')
@mock.patch('cloudinit.net.dhcp.find_fallback_nic')
def test_absent_dhclient_command(self, m_fallback, m_which):
"""When dhclient doesn't exist in the OS, log the issue and no-op."""
@@ -279,7 +279,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.temp_utils.os.getuid')
@mock.patch('cloudinit.net.dhcp.dhcp_discovery')
- @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.subp.which')
@mock.patch('cloudinit.net.dhcp.find_fallback_nic')
def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
"""maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
@@ -302,13 +302,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('time.sleep', mock.MagicMock())
@mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp,
m_kill):
"""dhcp_discovery logs a warning when pidfile contains invalid content.
Lease processing still occurs and no proc kill is attempted.
"""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -337,13 +338,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
@mock.patch('cloudinit.net.dhcp.util.wait_for_files')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self,
m_subp,
m_wait,
m_kill,
m_getppid):
"""dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -364,12 +366,13 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
"""dhcp_discovery brings up the interface and runs dhclient.
It also returns the parsed dhcp.leases file generated in the sandbox.
"""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -406,6 +409,41 @@ class TestDHCPDiscoveryClean(CiTestCase):
'eth9', '-sf', '/bin/true'], capture=True)])
m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
+ @mock.patch('cloudinit.net.dhcp.os.kill')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
+ def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid):
+ """"dhcp_log_func is called with the output and error streams of
+ dhclinet when the callable is passed."""
+ dhclient_err = 'FAKE DHCLIENT ERROR'
+ dhclient_out = 'FAKE DHCLIENT OUT'
+ m_subp.return_value = (dhclient_out, dhclient_err)
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
+ script_content = '#!/bin/bash\necho fake-dhclient'
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent("""
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """)
+ lease_file = os.path.join(tmpdir, 'dhcp.leases')
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, 'dhclient.pid')
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ def dhcp_log_func(out, err):
+ self.assertEqual(out, dhclient_out)
+ self.assertEqual(err, dhclient_err)
+
+ dhcp_discovery(
+ dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func)
+
class TestSystemdParseLeases(CiTestCase):
@@ -529,7 +567,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
# Ensure that no teardown happens:
m_dhcp.assert_not_called()
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
def test_ephemeral_dhcp_setup_network_if_url_connectivity(
self, m_dhcp, m_subp):
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 835ed807..eb458c39 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -14,7 +14,8 @@ import requests
import cloudinit.net as net
from cloudinit import safeyaml as yaml
from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
-from cloudinit.util import ProcessExecutionError, ensure_file, write_file
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.util import ensure_file, write_file
class TestSysDevPath(CiTestCase):
@@ -142,12 +143,6 @@ class TestReadSysNet(CiTestCase):
write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
self.assertFalse(net.is_up('eth0'))
- def test_is_wireless(self):
- """is_wireless is True when /sys/net/devname/wireless exists."""
- self.assertFalse(net.is_wireless('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
- self.assertTrue(net.is_wireless('eth0'))
-
def test_is_bridge(self):
"""is_bridge is True when /sys/net/devname/bridge exists."""
self.assertFalse(net.is_bridge('eth0'))
@@ -203,32 +198,6 @@ class TestReadSysNet(CiTestCase):
write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content)
self.assertTrue(net.is_vlan('eth0'))
- def test_is_connected_when_physically_connected(self):
- """is_connected is True when /sys/net/devname/iflink reports 2."""
- self.assertFalse(net.is_connected('eth0'))
- write_file(os.path.join(self.sysdir, 'eth0', 'iflink'), "2")
- self.assertTrue(net.is_connected('eth0'))
-
- def test_is_connected_when_wireless_and_carrier_active(self):
- """is_connected is True if wireless /sys/net/devname/carrier is 1."""
- self.assertFalse(net.is_connected('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
- self.assertFalse(net.is_connected('eth0'))
- write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), "1")
- self.assertTrue(net.is_connected('eth0'))
-
- def test_is_physical(self):
- """is_physical is True when /sys/net/devname/device exists."""
- self.assertFalse(net.is_physical('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
- self.assertTrue(net.is_physical('eth0'))
-
- def test_is_present(self):
- """is_present is True when /sys/net/devname exists."""
- self.assertFalse(net.is_present('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
- self.assertTrue(net.is_present('eth0'))
-
class TestGenerateFallbackConfig(CiTestCase):
@@ -541,7 +510,7 @@ class TestInterfaceHasOwnMAC(CiTestCase):
net.interface_has_own_mac('eth1', strict=True)
-@mock.patch('cloudinit.net.util.subp')
+@mock.patch('cloudinit.net.subp.subp')
class TestEphemeralIPV4Network(CiTestCase):
with_logs = True
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 1001f149..628e2908 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -13,6 +13,7 @@ import re
from cloudinit import log as logging
from cloudinit.net.network_state import net_prefix_to_ipv4_mask
+from cloudinit import subp
from cloudinit import util
from cloudinit.simpletable import SimpleTable
@@ -197,15 +198,15 @@ def _netdev_info_ifconfig(ifconfig_data):
def netdev_info(empty=""):
devs = {}
if util.is_NetBSD():
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+ (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig_netbsd(ifcfg_out)
- elif util.which('ip'):
+ elif subp.which('ip'):
# Try iproute first of all
- (ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
+ (ipaddr_out, _err) = subp.subp(["ip", "addr", "show"])
devs = _netdev_info_iproute(ipaddr_out)
- elif util.which('ifconfig'):
+ elif subp.which('ifconfig'):
# Fall back to net-tools if iproute2 is not present
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+ (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig(ifcfg_out)
else:
LOG.warning(
@@ -285,10 +286,10 @@ def _netdev_route_info_iproute(iproute_data):
entry['flags'] = ''.join(flags)
routes['ipv4'].append(entry)
try:
- (iproute_data6, _err6) = util.subp(
+ (iproute_data6, _err6) = subp.subp(
["ip", "--oneline", "-6", "route", "list", "table", "all"],
rcs=[0, 1])
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
else:
entries6 = iproute_data6.splitlines()
@@ -357,9 +358,9 @@ def _netdev_route_info_netstat(route_data):
routes['ipv4'].append(entry)
try:
- (route_data6, _err6) = util.subp(
+ (route_data6, _err6) = subp.subp(
["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
else:
entries6 = route_data6.splitlines()
@@ -393,13 +394,13 @@ def _netdev_route_info_netstat(route_data):
def route_info():
routes = {}
- if util.which('ip'):
+ if subp.which('ip'):
# Try iproute first of all
- (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"])
+ (iproute_out, _err) = subp.subp(["ip", "-o", "route", "list"])
routes = _netdev_route_info_iproute(iproute_out)
- elif util.which('netstat'):
+ elif subp.which('netstat'):
# Fall back to net-tools if iproute2 is not present
- (route_out, _err) = util.subp(
+ (route_out, _err) = subp.subp(
["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
routes = _netdev_route_info_netstat(route_out)
else:
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 946df7e0..6b9127b6 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -139,7 +139,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
self.incarnation_no)
self.publish_thread = threading.Thread(
- target=self._publish_event_routine)
+ target=self._publish_event_routine
+ )
self.publish_thread.daemon = True
self.publish_thread.start()
@@ -202,10 +203,15 @@ class HyperVKvpReportingHandler(ReportingHandler):
uuid.uuid4())
def _encode_kvp_item(self, key, value):
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- key.encode('utf-8'), value.encode('utf-8')))
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ key.encode("utf-8"),
+ value.encode("utf-8"),
+ )
return data
def _decode_kvp_item(self, record_data):
@@ -219,7 +225,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
v = (
record_data[
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
- ].decode('utf-8').strip('\x00'))
+ ].decode('utf-8').strip('\x00'))
return {'key': k, 'value': v}
@@ -265,11 +271,11 @@ class HyperVKvpReportingHandler(ReportingHandler):
"""
key = self._event_key(event)
meta_data = {
- "name": event.name,
- "type": event.event_type,
- "ts": (datetime.utcfromtimestamp(event.timestamp)
- .isoformat() + 'Z'),
- }
+ "name": event.name,
+ "type": event.event_type,
+ "ts": (datetime.utcfromtimestamp(event.timestamp)
+ .isoformat() + 'Z'),
+ }
if hasattr(event, self.RESULT_KEY):
meta_data[self.RESULT_KEY] = event.result
meta_data[self.MSG_KEY] = event.description
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 5270fda8..ac3ecc3d 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -18,9 +18,9 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
-from cloudinit.util import ProcessExecutionError
LOG = logging.getLogger(__name__)
@@ -192,7 +192,7 @@ class DataSourceAltCloud(sources.DataSource):
# modprobe floppy
try:
modprobe_floppy()
- except ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, 'Failed modprobe: %s', e)
return False
@@ -201,7 +201,7 @@ class DataSourceAltCloud(sources.DataSource):
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
- except (ProcessExecutionError, OSError) as e:
+ except (subp.ProcessExecutionError, OSError) as e:
util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
return False
@@ -261,7 +261,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
- out, _err = util.subp(CMD_PROBE_FLOPPY)
+ out, _err = subp.subp(CMD_PROBE_FLOPPY)
LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 61ec522a..41431a7a 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -22,6 +22,7 @@ from cloudinit.event import EventType
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers import netlink
+from cloudinit import subp
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
from cloudinit import util
from cloudinit.reporting import events
@@ -34,7 +35,8 @@ from cloudinit.sources.helpers.azure import (
get_system_info,
report_diagnostic_event,
EphemeralDHCPv4WithReporting,
- is_byte_swapped)
+ is_byte_swapped,
+ dhcp_log_cb)
LOG = logging.getLogger(__name__)
@@ -139,8 +141,8 @@ def find_dev_from_busdev(camcontrol_out, busdev):
def execute_or_debug(cmd, fail_ret=None):
try:
- return util.subp(cmd)[0]
- except util.ProcessExecutionError:
+ return subp.subp(cmd)[0]
+ except subp.ProcessExecutionError:
LOG.debug("Failed to execute: %s", ' '.join(cmd))
return fail_ret
@@ -252,11 +254,11 @@ DEF_PASSWD_REDACTION = 'REDACTED'
def get_hostname(hostname_command='hostname'):
if not isinstance(hostname_command, (list, tuple)):
hostname_command = (hostname_command,)
- return util.subp(hostname_command, capture=True)[0].strip()
+ return subp.subp(hostname_command, capture=True)[0].strip()
def set_hostname(hostname, hostname_command='hostname'):
- util.subp([hostname_command, hostname])
+ subp.subp([hostname_command, hostname])
@azure_ds_telemetry_reporter
@@ -343,7 +345,7 @@ class DataSourceAzure(sources.DataSource):
try:
invoke_agent(agent_cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
# claim the datasource even if the command failed
util.logexc(LOG, "agent command '%s' failed.",
self.ds_cfg['agent_command'])
@@ -522,8 +524,9 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata
+ )
except sources.InvalidMetaDataException as e:
LOG.warning('Could not crawl Azure metadata: %s', e)
return False
@@ -596,25 +599,35 @@ class DataSourceAzure(sources.DataSource):
return_val = None
def exc_cb(msg, exception):
- if isinstance(exception, UrlError) and exception.code == 404:
- if self.imds_poll_counter == self.imds_logging_threshold:
- # Reducing the logging frequency as we are polling IMDS
- self.imds_logging_threshold *= 2
- LOG.debug("Call to IMDS with arguments %s failed "
- "with status code %s after %s retries",
- msg, exception.code, self.imds_poll_counter)
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d", self.imds_logging_threshold)
- self.imds_poll_counter += 1
- return True
-
- # If we get an exception while trying to call IMDS, we
- # call DHCP and setup the ephemeral network to acquire the new IP.
- LOG.debug("Call to IMDS with arguments %s failed with "
- "status code %s", msg, exception.code)
- report_diagnostic_event("polling IMDS failed with exception %s"
- % exception.code)
- return False
+ if isinstance(exception, UrlError):
+ if exception.code in (404, 410):
+ if self.imds_poll_counter == self.imds_logging_threshold:
+ # Reducing the logging frequency as we are polling IMDS
+ self.imds_logging_threshold *= 2
+ LOG.debug("Call to IMDS with arguments %s failed "
+ "with status code %s after %s retries",
+ msg, exception.code, self.imds_poll_counter)
+ LOG.debug("Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold)
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ self.imds_poll_counter += 1
+ return True
+ else:
+ # If we get an exception while trying to call IMDS, we call
+ # DHCP and setup the ephemeral network to acquire a new IP.
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ return False
+
+ LOG.debug("poll IMDS failed with an unexpected exception: %s",
+ exception)
+ return False
LOG.debug("Wait for vnetswitch to happen")
while True:
@@ -624,7 +637,8 @@ class DataSourceAzure(sources.DataSource):
name="obtain-dhcp-lease",
description="obtain dhcp lease",
parent=azure_ds_reporter):
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
+ dhcp_log_func=dhcp_log_cb)
lease = self._ephemeral_dhcp_ctx.obtain_lease()
if vnet_switched:
@@ -882,9 +896,10 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
(cand_part, cand_path, devpath))
with events.ReportEventStack(
- name="mount-ntfs-and-count",
- description="mount-ntfs-and-count",
- parent=azure_ds_reporter) as evt:
+ name="mount-ntfs-and-count",
+ description="mount-ntfs-and-count",
+ parent=azure_ds_reporter
+ ) as evt:
try:
file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
update_env_for_mount={'LANG': 'C'})
@@ -913,9 +928,10 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
# wait for ephemeral disk to come up
naplen = .2
with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter):
+ name="wait-for-ephemeral-disk",
+ description="wait for ephemeral disk",
+ parent=azure_ds_reporter
+ ):
missing = util.wait_for_files([devpath],
maxwait=maxwait,
naplen=naplen,
@@ -972,7 +988,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
if command == "builtin":
if util.is_FreeBSD():
command = BOUNCE_COMMAND_FREEBSD
- elif util.which('ifup'):
+ elif subp.which('ifup'):
command = BOUNCE_COMMAND_IFUP
else:
LOG.debug(
@@ -983,7 +999,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
+ get_uptime=True, func=subp.subp,
kwargs={'args': command, 'shell': shell, 'capture': False,
'env': env})
return True
@@ -993,7 +1009,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+ (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
capture=True, data=data)
return out.rstrip()
@@ -1005,7 +1021,7 @@ def pubkeys_from_crt_files(flist):
for fname in flist:
try:
pubkeys.append(crtfile_to_pubkey(fname))
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
errors.append(fname)
if errors:
@@ -1047,7 +1063,7 @@ def invoke_agent(cmd):
# this is a function itself to simplify patching it for test
if cmd:
LOG.debug("invoking agent: %s", cmd)
- util.subp(cmd, shell=(not isinstance(cmd, list)))
+ subp.subp(cmd, shell=(not isinstance(cmd, list)))
else:
LOG.debug("not invoking agent")
@@ -1323,9 +1339,10 @@ def parse_network_config(imds_metadata):
@return: Dictionary containing network version 2 standard configuration.
"""
with events.ReportEventStack(
- name="parse_network_config",
- description="",
- parent=azure_ds_reporter) as evt:
+ name="parse_network_config",
+ description="",
+ parent=azure_ds_reporter
+ ) as evt:
if imds_metadata != sources.UNSET and imds_metadata:
netconfig = {'version': 2, 'ethernets': {}}
LOG.debug('Azure: generating network configuration from IMDS')
@@ -1469,9 +1486,10 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
def _is_platform_viable(seed_dir):
with events.ReportEventStack(
- name="check-platform-viability",
- description="found azure asset tag",
- parent=azure_ds_reporter) as evt:
+ name="check-platform-viability",
+ description="found azure asset tag",
+ parent=azure_ds_reporter
+ ) as evt:
"""Check platform environment to report if this datasource may run."""
asset_tag = util.read_dmi_data('chassis-asset-tag')
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 2013bed7..54810439 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -22,6 +22,7 @@ from cloudinit import log as logging
from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -46,7 +47,7 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = util.subp([
+ output, _ = subp.subp([
'wget', '--quiet', '--tries', '3', '--timeout', '20',
'--output-document', '-', '--header',
'DomU_Request: {0}'.format(domu_request),
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index ae31934b..62756cf7 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -10,6 +10,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.net import eni
@@ -245,7 +246,7 @@ def find_candidate_devs(probe_optical=True, dslist=None):
for device in OPTICAL_DEVICES:
try:
util.find_devs_with(path=device)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
by_fstype = []
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index e0ef665e..5040ce5b 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -58,7 +58,7 @@ class DataSourceDigitalOcean(sources.DataSource):
ipv4LL_nic = None
if self.use_ip4LL:
- ipv4LL_nic = do_helper.assign_ipv4_link_local()
+ ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
self.metadata_address, timeout=self.timeout,
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 50298330..70e4274c 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -59,7 +59,14 @@ class DataSourceHetzner(sources.DataSource):
self.userdata_address, timeout=self.timeout,
sec_between=self.wait_retry, retries=self.retries)
- self.userdata_raw = ud
+ # Hetzner cloud does not support binary user-data. So here, do a
+ # base64 decode of the data if we can. The end result being that a
+ # user can provide base64 encoded (possibly gzipped) data as user-data.
+ #
+ # The fallout is that in the event of b64 encoded user-data,
+ # /var/lib/cloud-init/cloud-config.txt will not be identical to the
+ # user-data provided. It will be decoded.
+ self.userdata_raw = hc_helper.maybe_b64decode(ud)
self.metadata_full = md
"""hostname is name provided by user at launch. The API enforces
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index e0c714e8..d2aa9a58 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -99,6 +99,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
from cloudinit.sources.helpers import openstack
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -240,7 +241,7 @@ def get_ibm_platform():
fslabels = {}
try:
devs = util.blkid()
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning("Failed to run blkid: %s", e)
return (None, None)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 41f999e3..4c5eee4f 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,6 +16,7 @@ from xml.dom import minidom
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config \
import Config
@@ -536,15 +537,15 @@ def transport_iso9660(require_iso=True):
def transport_vmware_guestinfo():
rpctool = "vmware-rpctool"
not_found = None
- if not util.which(rpctool):
+ if not subp.which(rpctool):
return not_found
cmd = [rpctool, "info-get guestinfo.ovfEnv"]
try:
- out, _err = util.subp(cmd)
+ out, _err = subp.subp(cmd)
if out:
return out
LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
LOG.warning("%s exited with code %d", rpctool, e.exit_code)
LOG.debug(e)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index a08ab404..12b1f94f 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -13,6 +13,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import collections
+import functools
import os
import pwd
import re
@@ -21,6 +22,7 @@ import string
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
@@ -59,10 +61,19 @@ class DataSourceOpenNebula(sources.DataSource):
for cdev in candidates:
try:
if os.path.isdir(self.seed_dir):
- results = read_context_disk_dir(cdev, asuser=parseuser)
+ results = read_context_disk_dir(
+ cdev, self.distro, asuser=parseuser
+ )
elif cdev.startswith("/dev"):
- results = util.mount_cb(cdev, read_context_disk_dir,
- data=parseuser)
+ # util.mount_cb only handles passing a single argument
+ # through to the wrapped function, so we have to partially
+ # apply the function to pass in `distro`. See LP: #1884979
+ partially_applied_func = functools.partial(
+ read_context_disk_dir,
+ asuser=parseuser,
+ distro=self.distro,
+ )
+ results = util.mount_cb(cdev, partially_applied_func)
except NonContextDiskDir:
continue
except BrokenContextDiskDir as exc:
@@ -128,10 +139,10 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
- def __init__(self, context, system_nics_by_mac=None):
+ def __init__(self, context, distro, system_nics_by_mac=None):
self.context = context
if system_nics_by_mac is None:
- system_nics_by_mac = get_physical_nics_by_mac()
+ system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
[k for k in sorted(system_nics_by_mac.items(),
key=lambda k: net.natural_sort_key(k[1]))])
@@ -334,7 +345,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
cmd.extend(bash)
- (output, _error) = util.subp(cmd, data=bcmd)
+ (output, _error) = subp.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
excluded = (
@@ -366,7 +377,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
return ret
-def read_context_disk_dir(source_dir, asuser=None):
+def read_context_disk_dir(source_dir, distro, asuser=None):
"""
read_context_disk_dir(source_dir):
read source_dir and return a tuple with metadata dict and user-data
@@ -396,7 +407,7 @@ def read_context_disk_dir(source_dir, asuser=None):
path = os.path.join(source_dir, 'context.sh')
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
except IOError as e:
raise NonContextDiskDir("Error reading context.sh: %s" % (e))
@@ -449,15 +460,17 @@ def read_context_disk_dir(source_dir, asuser=None):
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
if ipaddr_keys:
- onet = OpenNebulaNetwork(context)
+ onet = OpenNebulaNetwork(context, distro)
results['network-interfaces'] = onet.gen_conf()
return results
-def get_physical_nics_by_mac():
+def get_physical_nics_by_mac(distro):
devs = net.get_interfaces_by_mac()
- return dict([(m, n) for m, n in devs.items() if net.is_physical(n)])
+ return dict(
+ [(m, n) for m, n in devs.items() if distro.networking.is_physical(n)]
+ )
# Legacy: Must be present in case we load an old pkl object
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index 084cb7d5..e064c8d6 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -15,6 +15,7 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -43,11 +44,11 @@ def int2ip(addr):
def _sub_arp(cmd):
"""
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the preferred cloud-init subprocess def of subp.subp
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return util.subp(['arping'] + cmd)
+ return subp.subp(['arping'] + cmd)
def gratuitous_arp(items, distro):
@@ -61,7 +62,7 @@ def gratuitous_arp(items, distro):
source_param, item['source'],
item['destination']
])
- except util.ProcessExecutionError as error:
+ except subp.ProcessExecutionError as error:
# warning, because the system is able to function properly
# despite no success - some ARP table may be waiting for
# expiration, but the system may continue
@@ -71,17 +72,16 @@ def gratuitous_arp(items, distro):
def get_md():
rbx_data = None
- devices = [
- dev
- for dev, bdata in util.blkid().items()
- if bdata.get('LABEL', '').upper() == 'CLOUDMD'
- ]
+ devices = set(
+ util.find_devs_with('LABEL=CLOUDMD') +
+ util.find_devs_with('LABEL=cloudmd')
+ )
for device in devices:
try:
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat']
+ mtype=['vfat', 'fat', 'msdosfs']
)
if rbx_data:
break
@@ -189,7 +189,6 @@ def read_user_data_callback(mount_dir):
'passwd': hash,
'lock_passwd': False,
'ssh_authorized_keys': ssh_keys,
- 'shell': '/bin/bash'
}
},
'network_config': network,
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index cf676504..843b3a2a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -33,6 +33,7 @@ import socket
from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -696,9 +697,9 @@ def identify_file(content_f):
cmd = ["file", "--brief", "--mime-type", content_f]
f_type = None
try:
- (f_type, _err) = util.subp(cmd)
+ (f_type, _err) = subp.subp(cmd)
LOG.debug("script %s mime type is %s", content_f, f_type)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(
LOG, ("Failed to identify script type for %s" % content_f, e))
return None if f_type is None else f_type.strip()
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index fc760581..7afa7ed8 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -15,6 +15,7 @@ from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
@@ -64,13 +65,15 @@ def is_byte_swapped(previous_id, current_id):
return ''.join(dd)
parts = current_id.split('-')
- swapped_id = '-'.join([
+ swapped_id = '-'.join(
+ [
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
parts[4]
- ])
+ ]
+ )
return previous_id == swapped_id
@@ -90,7 +93,7 @@ def get_boot_telemetry():
raise RuntimeError("Failed to determine kernel start timestamp")
try:
- out, _ = util.subp(['/bin/systemctl',
+ out, _ = subp.subp(['/bin/systemctl',
'show', '-p',
'UserspaceTimestampMonotonic'],
capture=True)
@@ -103,7 +106,7 @@ def get_boot_telemetry():
"UserspaceTimestampMonotonic from systemd")
user_start = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s"
% e)
except ValueError as e:
@@ -112,7 +115,7 @@ def get_boot_telemetry():
% e)
try:
- out, _ = util.subp(['/bin/systemctl', 'show',
+ out, _ = subp.subp(['/bin/systemctl', 'show',
'cloud-init-local', '-p',
'InactiveExitTimestampMonotonic'],
capture=True)
@@ -124,7 +127,7 @@ def get_boot_telemetry():
"InactiveExitTimestampMonotonic from systemd")
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s"
% e)
except ValueError as e:
@@ -282,7 +285,7 @@ class OpenSSLManager(object):
LOG.debug('Certificate already generated.')
return
with cd(self.tmpdir):
- util.subp([
+ subp.subp([
'openssl', 'req', '-x509', '-nodes', '-subj',
'/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
'-keyout', self.certificate_names['private_key'],
@@ -299,14 +302,14 @@ class OpenSSLManager(object):
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
cmd = ['openssl', 'x509', '-noout', action]
- result, _ = util.subp(cmd, data=cert)
+ result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
pub_key = self._run_x509_action('-pubkey', certificate)
keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
- ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+ ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@azure_ds_telemetry_reporter
@@ -339,7 +342,7 @@ class OpenSSLManager(object):
certificates_content.encode('utf-8'),
]
with cd(self.tmpdir):
- out, _ = util.subp(
+ out, _ = subp.subp(
'openssl cms -decrypt -in /dev/stdin -inkey'
' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
' -password pass:'.format(**self.certificate_names),
@@ -623,10 +626,16 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
shim.clean_up()
+def dhcp_log_cb(out, err):
+ report_diagnostic_event("dhclient output stream: %s" % out)
+ report_diagnostic_event("dhclient error stream: %s" % err)
+
+
class EphemeralDHCPv4WithReporting(object):
def __init__(self, reporter, nic=None):
self.reporter = reporter
- self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic)
+ self.ephemeralDHCPv4 = EphemeralDHCPv4(
+ iface=nic, dhcp_log_func=dhcp_log_cb)
def __enter__(self):
with events.ReportEventStack(
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 0e7cccac..b545c4d6 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,6 +8,7 @@ import random
from cloudinit import log as logging
from cloudinit import net as cloudnet
from cloudinit import url_helper
+from cloudinit import subp
from cloudinit import util
NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
@@ -15,7 +16,7 @@ NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
LOG = logging.getLogger(__name__)
-def assign_ipv4_link_local(nic=None):
+def assign_ipv4_link_local(distro, nic=None):
"""Bring up NIC using an address using link-local (ip4LL) IPs. On
DigitalOcean, the link-local domain is per-droplet routed, so there
is no risk of collisions. However, to be more safe, the ip4LL
@@ -23,7 +24,7 @@ def assign_ipv4_link_local(nic=None):
"""
if not nic:
- nic = get_link_local_nic()
+ nic = get_link_local_nic(distro)
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
@@ -36,14 +37,14 @@ def assign_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
- if not util.which('ip'):
+ if not subp.which('ip'):
raise RuntimeError("No 'ip' command available to configure ip4LL "
"address")
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
- util.subp(ip_link_cmd)
+ subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
@@ -53,8 +54,12 @@ def assign_ipv4_link_local(nic=None):
return nic
-def get_link_local_nic():
- nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)]
+def get_link_local_nic(distro):
+ nics = [
+ f
+ for f in cloudnet.get_devicelist()
+ if distro.networking.is_physical(f)
+ ]
if not nics:
return None
return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
@@ -74,7 +79,7 @@ def del_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("removed ip4LL addresses from %s", nic)
except Exception as e:
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 2554530d..72edb023 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -7,6 +7,9 @@ from cloudinit import log as logging
from cloudinit import url_helper
from cloudinit import util
+import base64
+import binascii
+
LOG = logging.getLogger(__name__)
@@ -24,3 +27,19 @@ def read_userdata(url, timeout=2, sec_between=2, retries=30):
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
+
+
+def maybe_b64decode(data: bytes) -> bytes:
+ """base64 decode data
+
+ If data is base64 encoded bytes, return b64decode(data).
+ If not, return data unmodified.
+
+ @param data: data as bytes. TypeError is raised if not bytes.
+ """
+ if not isinstance(data, bytes):
+ raise TypeError("data is '%s', expected bytes" % type(data))
+ try:
+ return base64.b64decode(data, validate=True)
+ except binascii.Error:
+ return data
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index a4373f24..1050efb0 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -16,6 +16,7 @@ from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources import BrokenMetadata
@@ -110,7 +111,7 @@ class SourceMixin(object):
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return device
@@ -397,7 +398,10 @@ class ConfigDriveReader(BaseReader):
except IOError:
raise BrokenMetadata("Failed to read: %s" % path)
try:
- md[key] = translator(contents)
+ # Disable not-callable pylint check; pylint isn't able to
+ # determine that every member of FILES_V1 has a callable in
+ # the appropriate position
+ md[key] = translator(contents) # pylint: disable=E1102
except Exception as e:
raise BrokenMetadata("Failed to process "
"path %s: %s" % (path, e))
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
index 58c3adc6..10760bd6 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/cloudinit/sources/helpers/tests/test_netlink.py
@@ -180,17 +180,22 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_DOWN)
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_UP)
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
data_op_down_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_DOWN)
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_down_eth1,
- data_op_up_eth1,
- data_op_down_eth0,
- data_op_up_eth0]
+ expected_ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0
+ ]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
self.assertIn('Ignored netlink event on interface %s' % other_ifname,
self.logs.getvalue())
@@ -207,17 +212,23 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_getlink_down = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_DOWN)
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP)
+ ifname, RTM_GETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_getlink_down,
- data_getlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -233,19 +244,25 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_setlink_down = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_DOWN)
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP)
+ ifname, RTM_SETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_setlink_down,
- data_setlink_up,
- data_newlink_down,
- data_newlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -255,23 +272,30 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_NOTPRESENT)
- data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_LOWERLAYERDOWN)
- data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_TESTING)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up, data_op_up,
+ data_op_dormant, data_op_up,
+ data_op_notpresent, data_op_up,
+ data_op_lowerdown, data_op_up,
+ data_op_testing, data_op_up,
+ data_op_unknown, data_op_up,
+ data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
@@ -281,12 +305,14 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant,
- data_op_unknown, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT)
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ m_read_netlink_socket.side_effect = [
+ data_op_down, data_op_dormant,
+ data_op_unknown, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -300,9 +326,11 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
- m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid, data_op_up,
+ data_op_down, data_op_invalid,
+ data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
@@ -333,8 +361,9 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2,
- data_op_down, data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_invalid1, data_invalid2, data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -344,11 +373,15 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
+ 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
@@ -360,14 +393,18 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.side_effect = [data1, data2]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 9f14770e..2ab22de9 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,6 +9,7 @@ import logging
import os
import stat
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -61,7 +62,7 @@ class PreCustomScript(RunCustomScript):
"""Executing custom script with precustomization argument."""
LOG.debug("Executing pre-customization script")
self.prepare_script()
- util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
+ subp.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
class PostCustomScript(RunCustomScript):
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 77cbf3b6..3745a262 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -10,6 +10,7 @@ import os
import re
from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp
from cloudinit import util
logger = logging.getLogger(__name__)
@@ -73,7 +74,7 @@ class NicConfigurator(object):
The mac address(es) are in the lower case
"""
cmd = ['ip', 'addr', 'show']
- output, _err = util.subp(cmd)
+ output, _err = subp.subp(cmd)
sections = re.split(r'\n\d+: ', '\n' + output)[1:]
macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
@@ -248,8 +249,8 @@ class NicConfigurator(object):
logger.info('Clearing DHCP leases')
# Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
+ subp.subp(["pkill", "dhclient"], rcs=[0, 1])
+ subp.subp(["rm", "-f", "/var/lib/dhcp/*"])
def configure(self, osfamily=None):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 8c91fa41..d16a7690 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,6 +9,7 @@
import logging
import os
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -56,10 +57,10 @@ class PasswordConfigurator(object):
LOG.info('Expiring password.')
for user in uidUserList:
try:
- util.subp(['passwd', '--expire', user])
- except util.ProcessExecutionError as e:
+ subp.subp(['passwd', '--expire', user])
+ except subp.ProcessExecutionError as e:
if os.path.exists('/usr/bin/chage'):
- util.subp(['chage', '-d', '0', user])
+ subp.subp(['chage', '-d', '0', user])
else:
LOG.warning('Failed to expire password for %s with error: '
'%s', user, e)
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index c60a38d7..d919f693 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -10,7 +10,7 @@ import os
import re
import time
-from cloudinit import util
+from cloudinit import subp
from .guestcust_event import GuestCustEventEnum
from .guestcust_state import GuestCustStateEnum
@@ -34,7 +34,7 @@ def send_rpc(rpc):
try:
logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ (out, err) = subp.subp(["vmware-rpctool", rpc], rcs=[0])
# Remove the trailing newline in the output.
if out:
out = out.rstrip()
@@ -128,28 +128,35 @@ def get_tools_config(section, key, defaultVal):
not installed.
"""
- if not util.which('vmware-toolbox-cmd'):
+ if not subp.which('vmware-toolbox-cmd'):
logger.debug(
'vmware-toolbox-cmd not installed, returning default value')
return defaultVal
- retValue = defaultVal
cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
try:
- (outText, _) = util.subp(cmd)
- m = re.match(r'([^=]+)=(.*)', outText)
- if m:
- retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
- else:
+ (outText, _) = subp.subp(cmd)
+ except subp.ProcessExecutionError as e:
+ if e.exit_code == 69:
logger.debug(
- "Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
- except util.ProcessExecutionError as e:
- logger.error("Failed running %s[%s]", cmd, e.exit_code)
- logger.exception(e)
+ "vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
+ " Return default value: %s", " ".join(cmd), defaultVal)
+ else:
+ logger.error("Failed running %s[%s]", cmd, e.exit_code)
+ logger.exception(e)
+ return defaultVal
+
+ retValue = defaultVal
+ m = re.match(r'([^=]+)=(.*)', outText)
+ if m:
+ retValue = m.group(2).strip()
+ logger.debug("Get tools config: [%s] %s = %s",
+ section, key, retValue)
+ else:
+ logger.debug(
+ "Tools config: [%s] %s is not found, return default value: %s",
+ section, key, retValue)
return retValue
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 5b6f1b3f..1420a988 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -436,10 +436,11 @@ class TestDataSource(CiTestCase):
expected = {
'base64_encoded_keys': [],
'merged_cfg': {
- '_doc': (
- 'Merged cloud-init system config from '
- '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'),
- 'datasource': {'_undef': {'key1': False}}},
+ '_doc': (
+ 'Merged cloud-init system config from '
+ '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
+ ),
+ 'datasource': {'_undef': {'key1': False}}},
'sensitive_keys': [
'ds/meta_data/some/security-credentials', 'merged_cfg'],
'sys_info': sys_info,
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 918c4aec..72e4e65e 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -346,7 +346,7 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
util.write_file(
fname, "\n".join(
[str(line) for line in lines]
- ) + "\n", copy_mode=True)
+ ) + "\n", preserve_mode=True)
return len(changed) != 0
diff --git a/cloudinit/subp.py b/cloudinit/subp.py
index 0ad09306..804ef3ca 100644
--- a/cloudinit/subp.py
+++ b/cloudinit/subp.py
@@ -1,9 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Common utility functions for interacting with subprocess."""
-# TODO move subp shellify and runparts related functions out of util.py
-
import logging
+import os
+import subprocess
+
+from errno import ENOEXEC
LOG = logging.getLogger(__name__)
@@ -54,4 +56,331 @@ def prepend_base_command(base_command, commands):
return fixed_commands
+class ProcessExecutionError(IOError):
+
+ MESSAGE_TMPL = ('%(description)s\n'
+ 'Command: %(cmd)s\n'
+ 'Exit code: %(exit_code)s\n'
+ 'Reason: %(reason)s\n'
+ 'Stdout: %(stdout)s\n'
+ 'Stderr: %(stderr)s')
+ empty_attr = '-'
+
+ def __init__(self, stdout=None, stderr=None,
+ exit_code=None, cmd=None,
+ description=None, reason=None,
+ errno=None):
+ if not cmd:
+ self.cmd = self.empty_attr
+ else:
+ self.cmd = cmd
+
+ if not description:
+ if not exit_code and errno == ENOEXEC:
+ self.description = 'Exec format error. Missing #! in script?'
+ else:
+ self.description = 'Unexpected error while running command.'
+ else:
+ self.description = description
+
+ if not isinstance(exit_code, int):
+ self.exit_code = self.empty_attr
+ else:
+ self.exit_code = exit_code
+
+ if not stderr:
+ if stderr is None:
+ self.stderr = self.empty_attr
+ else:
+ self.stderr = stderr
+ else:
+ self.stderr = self._indent_text(stderr)
+
+ if not stdout:
+ if stdout is None:
+ self.stdout = self.empty_attr
+ else:
+ self.stdout = stdout
+ else:
+ self.stdout = self._indent_text(stdout)
+
+ if reason:
+ self.reason = reason
+ else:
+ self.reason = self.empty_attr
+
+ self.errno = errno
+ message = self.MESSAGE_TMPL % {
+ 'description': self._ensure_string(self.description),
+ 'cmd': self._ensure_string(self.cmd),
+ 'exit_code': self._ensure_string(self.exit_code),
+ 'stdout': self._ensure_string(self.stdout),
+ 'stderr': self._ensure_string(self.stderr),
+ 'reason': self._ensure_string(self.reason),
+ }
+ IOError.__init__(self, message)
+
+ def _ensure_string(self, text):
+ """
+ if data is bytes object, decode
+ """
+ return text.decode() if isinstance(text, bytes) else text
+
+ def _indent_text(self, text, indent_level=8):
+ """
+ indent text on all but the first line, allowing for easy to read output
+ """
+ cr = '\n'
+ indent = ' ' * indent_level
+ # if input is bytes, return bytes
+ if isinstance(text, bytes):
+ cr = cr.encode()
+ indent = indent.encode()
+ # remove any newlines at end of text first to prevent unneeded blank
+ # line in output
+ return text.rstrip(cr).replace(cr, cr + indent)
+
+
+def subp(args, data=None, rcs=None, env=None, capture=True,
+ combine_capture=False, shell=False,
+ logstring=False, decode="replace", target=None, update_env=None,
+ status_cb=None):
+ """Run a subprocess.
+
+ :param args: command to run in a list. [cmd, arg1, arg2...]
+ :param data: input to the command, made available on its stdin.
+ :param rcs:
+ a list of allowed return codes. If subprocess exits with a value not
+ in this list, a ProcessExecutionError will be raised. By default,
+ data is returned as a string. See 'decode' parameter.
+ :param env: a dictionary for the command's environment.
+ :param capture:
+ boolean indicating if output should be captured. If True, then stderr
+ and stdout will be returned. If False, they will not be redirected.
+ :param combine_capture:
+ boolean indicating if stderr should be redirected to stdout. When True,
+ interleaved stderr and stdout will be returned as the first element of
+ a tuple, the second will be empty string or bytes (per decode).
+ if combine_capture is True, then output is captured independent of
+ the value of capture.
+ :param shell: boolean indicating if this should be run with a shell.
+ :param logstring:
+ the command will be logged to DEBUG. If it contains info that should
+ not be logged, then logstring will be logged instead.
+ :param decode:
+ if False, no decoding will be done and returned stdout and stderr will
+ be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
+ These values are passed through to bytes().decode() as the 'errors'
+ parameter. There is no support for decoding to other than utf-8.
+ :param target:
+ not supported, kwarg present only to make function signature similar
+ to curtin's subp.
+ :param update_env:
+ update the enviornment for this command with this dictionary.
+ this will not affect the current processes os.environ.
+ :param status_cb:
+ call this fuction with a single string argument before starting
+ and after finishing.
+
+ :return
+ if not capturing, return is (None, None)
+ if capturing, stdout and stderr are returned.
+ if decode:
+ entries in tuple will be python2 unicode or python3 string
+ if not decode:
+ entries in tuple will be python2 string or python3 bytes
+ """
+
+ # not supported in cloud-init (yet), for now kept in the call signature
+ # to ease maintaining code shared between cloud-init and curtin
+ if target is not None:
+ raise ValueError("target arg not supported by cloud-init")
+
+ if rcs is None:
+ rcs = [0]
+
+ devnull_fp = None
+
+ if update_env:
+ if env is None:
+ env = os.environ
+ env = env.copy()
+ env.update(update_env)
+
+ if target_path(target) != "/":
+ args = ['chroot', target] + list(args)
+
+ if status_cb:
+ command = ' '.join(args) if isinstance(args, list) else args
+ status_cb('Begin run command: {command}\n'.format(command=command))
+ if not logstring:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"),
+ args, rcs, shell, 'combine' if combine_capture else capture)
+ else:
+ LOG.debug(("Running hidden command to protect sensitive "
+ "input/output logstring: %s"), logstring)
+
+ stdin = None
+ stdout = None
+ stderr = None
+ if capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ if combine_capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT
+ if data is None:
+ # using devnull assures any reads get null, rather
+ # than possibly waiting on input.
+ devnull_fp = open(os.devnull)
+ stdin = devnull_fp
+ else:
+ stdin = subprocess.PIPE
+ if not isinstance(data, bytes):
+ data = data.encode()
+
+ # Popen converts entries in the arguments array from non-bytes to bytes.
+ # When locale is unset it may use ascii for that encoding which can
+ # cause UnicodeDecodeErrors. (LP: #1751051)
+ if isinstance(args, bytes):
+ bytes_args = args
+ elif isinstance(args, str):
+ bytes_args = args.encode("utf-8")
+ else:
+ bytes_args = [
+ x if isinstance(x, bytes) else x.encode("utf-8")
+ for x in args]
+ try:
+ sp = subprocess.Popen(bytes_args, stdout=stdout,
+ stderr=stderr, stdin=stdin,
+ env=env, shell=shell)
+ (out, err) = sp.communicate(data)
+ except OSError as e:
+ if status_cb:
+ status_cb('ERROR: End run command: invalid command provided\n')
+ raise ProcessExecutionError(
+ cmd=args, reason=e, errno=e.errno,
+ stdout="-" if decode else b"-",
+ stderr="-" if decode else b"-")
+ finally:
+ if devnull_fp:
+ devnull_fp.close()
+
+ # Just ensure blank instead of none.
+ if capture or combine_capture:
+ if not out:
+ out = b''
+ if not err:
+ err = b''
+ if decode:
+ def ldecode(data, m='utf-8'):
+ if not isinstance(data, bytes):
+ return data
+ return data.decode(m, decode)
+
+ out = ldecode(out)
+ err = ldecode(err)
+
+ rc = sp.returncode
+ if rc not in rcs:
+ if status_cb:
+ status_cb(
+ 'ERROR: End run command: exit({code})\n'.format(code=rc))
+ raise ProcessExecutionError(stdout=out, stderr=err,
+ exit_code=rc,
+ cmd=args)
+ if status_cb:
+ status_cb('End run command: exit({code})\n'.format(code=rc))
+ return (out, err)
+
+
+def target_path(target, path=None):
+ # return 'path' inside target, accepting target as None
+ if target in (None, ""):
+ target = "/"
+ elif not isinstance(target, str):
+ raise ValueError("Unexpected input for target: %s" % target)
+ else:
+ target = os.path.abspath(target)
+ # abspath("//") returns "//" specifically for 2 slashes.
+ if target.startswith("//"):
+ target = target[1:]
+
+ if not path:
+ return target
+
+ # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
+ while len(path) and path[0] == "/":
+ path = path[1:]
+
+ return os.path.join(target, path)
+
+
+def which(program, search=None, target=None):
+ target = target_path(target)
+
+ if os.path.sep in program:
+ # if program had a '/' in it, then do not search PATH
+ # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
+ # so effectively we set cwd to / (or target)
+ if is_exe(target_path(target, program)):
+ return program
+
+ if search is None:
+ paths = [p.strip('"') for p in
+ os.environ.get("PATH", "").split(os.pathsep)]
+ if target == "/":
+ search = paths
+ else:
+ search = [p for p in paths if p.startswith("/")]
+
+ # normalize path input
+ search = [os.path.abspath(p) for p in search]
+
+ for path in search:
+ ppath = os.path.sep.join((path, program))
+ if is_exe(target_path(target, ppath)):
+ return ppath
+
+ return None
+
+
+def is_exe(fpath):
+ # return boolean indicating if fpath exists and is executable.
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+
+def runparts(dirp, skip_no_exist=True, exe_prefix=None):
+ if skip_no_exist and not os.path.isdir(dirp):
+ return
+
+ failed = []
+ attempted = []
+
+ if exe_prefix is None:
+ prefix = []
+ elif isinstance(exe_prefix, str):
+ prefix = [str(exe_prefix)]
+ elif isinstance(exe_prefix, list):
+ prefix = exe_prefix
+ else:
+ raise TypeError("exe_prefix must be None, str, or list")
+
+ for exe_name in sorted(os.listdir(dirp)):
+ exe_path = os.path.join(dirp, exe_name)
+ if is_exe(exe_path):
+ attempted.append(exe_path)
+ try:
+ subp(prefix + [exe_path], capture=False)
+ except ProcessExecutionError as e:
+ LOG.debug(e)
+ failed.append(exe_name)
+
+ if failed and attempted:
+ raise RuntimeError(
+ 'Runparts: %s failures (%s) in %s attempted commands' %
+ (len(failed), ",".join(failed), len(attempted)))
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index f3ab7e8c..58f63b69 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -23,9 +23,10 @@ from cloudinit import distros
from cloudinit import helpers as ch
from cloudinit.sources import DataSourceNone
from cloudinit.templater import JINJA_AVAILABLE
+from cloudinit import subp
from cloudinit import util
-_real_subp = util.subp
+_real_subp = subp.subp
# Used for skipping tests
SkipTest = unittest.SkipTest
@@ -134,9 +135,9 @@ class CiTestCase(TestCase):
self.old_handlers = self.logger.handlers
self.logger.handlers = [handler]
if self.allowed_subp is True:
- util.subp = _real_subp
+ subp.subp = _real_subp
else:
- util.subp = self._fake_subp
+ subp.subp = self._fake_subp
def _fake_subp(self, *args, **kwargs):
if 'args' in kwargs:
@@ -171,7 +172,7 @@ class CiTestCase(TestCase):
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
logging.getLogger().level = None
- util.subp = _real_subp
+ subp.subp = _real_subp
super(CiTestCase, self).tearDown()
def tmp_dir(self, dir=None, cleanup=True):
@@ -280,13 +281,13 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
mock.patch.object(mod, f, trap_func))
# Handle subprocess calls
- func = getattr(util, 'subp')
+ func = getattr(subp, 'subp')
def nsubp(*_args, **_kwargs):
return ('', '')
self.patched_funcs.enter_context(
- mock.patch.object(util, 'subp', nsubp))
+ mock.patch.object(subp, 'subp', nsubp))
def null_func(*_args, **_kwargs):
return None
diff --git a/cloudinit/tests/test_conftest.py b/cloudinit/tests/test_conftest.py
index 773ef8fe..6f1263a5 100644
--- a/cloudinit/tests/test_conftest.py
+++ b/cloudinit/tests/test_conftest.py
@@ -1,6 +1,6 @@
import pytest
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase
@@ -9,36 +9,40 @@ class TestDisableSubpUsage:
def test_using_subp_raises_assertion_error(self):
with pytest.raises(AssertionError):
- util.subp(["some", "args"])
+ subp.subp(["some", "args"])
def test_typeerrors_on_incorrect_usage(self):
with pytest.raises(TypeError):
# We are intentionally passing no value for a parameter, so:
# pylint: disable=no-value-for-parameter
- util.subp()
+ subp.subp()
- @pytest.mark.parametrize('disable_subp_usage', [False], indirect=True)
+ @pytest.mark.allow_all_subp
def test_subp_usage_can_be_reenabled(self):
- util.subp(['whoami'])
+ subp.subp(['whoami'])
- @pytest.mark.parametrize(
- 'disable_subp_usage', [['whoami'], 'whoami'], indirect=True)
+ @pytest.mark.allow_subp_for("whoami")
def test_subp_usage_can_be_conditionally_reenabled(self):
# The two parameters test each potential invocation with a single
# argument
with pytest.raises(AssertionError) as excinfo:
- util.subp(["some", "args"])
+ subp.subp(["some", "args"])
assert "allowed: whoami" in str(excinfo.value)
- util.subp(['whoami'])
+ subp.subp(['whoami'])
- @pytest.mark.parametrize(
- 'disable_subp_usage', [['whoami', 'bash']], indirect=True)
+ @pytest.mark.allow_subp_for("whoami", "bash")
def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self):
with pytest.raises(AssertionError) as excinfo:
- util.subp(["some", "args"])
+ subp.subp(["some", "args"])
assert "allowed: whoami,bash" in str(excinfo.value)
- util.subp(['bash', '-c', 'true'])
- util.subp(['whoami'])
+ subp.subp(['bash', '-c', 'true'])
+ subp.subp(['whoami'])
+
+ @pytest.mark.allow_all_subp
+ @pytest.mark.allow_subp_for("bash")
+ def test_both_marks_raise_an_error(self):
+ with pytest.raises(AssertionError, match="marked both"):
+ subp.subp(["bash"])
class TestDisableSubpUsageInTestSubclass(CiTestCase):
@@ -46,16 +50,16 @@ class TestDisableSubpUsageInTestSubclass(CiTestCase):
def test_using_subp_raises_exception(self):
with pytest.raises(Exception):
- util.subp(["some", "args"])
+ subp.subp(["some", "args"])
def test_typeerrors_on_incorrect_usage(self):
with pytest.raises(TypeError):
- util.subp()
+ subp.subp()
def test_subp_usage_can_be_reenabled(self):
_old_allowed_subp = self.allow_subp
self.allowed_subp = True
try:
- util.subp(['bash', '-c', 'true'])
+ subp.subp(['bash', '-c', 'true'])
finally:
self.allowed_subp = _old_allowed_subp
diff --git a/cloudinit/tests/test_features.py b/cloudinit/tests/test_features.py
new file mode 100644
index 00000000..d7a7226d
--- /dev/null
+++ b/cloudinit/tests/test_features.py
@@ -0,0 +1,60 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# pylint: disable=no-member,no-name-in-module
+"""
+This file is for testing the feature flag functionality itself,
+NOT for testing any individual feature flag
+"""
+import pytest
+import sys
+from pathlib import Path
+
+import cloudinit
+
+
+@pytest.yield_fixture()
+def create_override(request):
+ """
+ Create a feature overrides file and do some module wizardry to make
+ it seem like we're importing the features file for the first time.
+
+ After creating the override file with the values passed by the test,
+ we need to reload cloudinit.features
+ to get all of the current features (including the overridden ones).
+ Once the test is complete, we remove the file we created and set
+ features and feature_overrides modules to how they were before
+ the test started
+ """
+ override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py'
+ if override_path.exists():
+ raise Exception("feature_overrides.py unexpectedly exists! "
+ "Remove it to run this test.")
+ with override_path.open('w') as f:
+ for key, value in request.param.items():
+ f.write('{} = {}\n'.format(key, value))
+
+ sys.modules.pop('cloudinit.features', None)
+
+ yield
+
+ override_path.unlink()
+ sys.modules.pop('cloudinit.feature_overrides', None)
+
+
+class TestFeatures:
+ def test_feature_without_override(self):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+ assert ERROR_ON_USER_DATA_FAILURE is True
+
+ @pytest.mark.parametrize('create_override',
+ [{'ERROR_ON_USER_DATA_FAILURE': False}],
+ indirect=True)
+ def test_feature_with_override(self, create_override):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+ assert ERROR_ON_USER_DATA_FAILURE is False
+
+ @pytest.mark.parametrize('create_override',
+ [{'SPAM': True}],
+ indirect=True)
+ def test_feature_only_in_override(self, create_override):
+ from cloudinit.features import SPAM
+ assert SPAM is True
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
index 8dd57137..f96f5372 100644
--- a/cloudinit/tests/test_gpg.py
+++ b/cloudinit/tests/test_gpg.py
@@ -4,19 +4,19 @@
from unittest import mock
from cloudinit import gpg
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase
@mock.patch("cloudinit.gpg.time.sleep")
-@mock.patch("cloudinit.gpg.util.subp")
+@mock.patch("cloudinit.gpg.subp.subp")
class TestReceiveKeys(CiTestCase):
"""Test the recv_key method."""
def test_retries_on_subp_exc(self, m_subp, m_sleep):
"""retry should be done on gpg receive keys failure."""
retries = (1, 2, 4)
- my_exc = util.ProcessExecutionError(
+ my_exc = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
m_subp.side_effect = (my_exc, my_exc, ('', ''))
gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
@@ -26,7 +26,7 @@ class TestReceiveKeys(CiTestCase):
"""If the final run fails, error should be raised."""
naplen = 1
keyid, keyserver = ("ABCD", "keyserver.example.com")
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
with self.assertRaises(ValueError) as rcm:
gpg.recv_key(keyid, keyserver, retries=(naplen,))
@@ -36,7 +36,7 @@ class TestReceiveKeys(CiTestCase):
def test_no_retries_on_none(self, m_subp, m_sleep):
"""retry should not be done if retries is None."""
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
with self.assertRaises(ValueError):
gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index 1c8a791e..e44b16d8 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -27,8 +27,8 @@ class TestNetInfo(CiTestCase):
maxDiff = None
with_logs = True
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_old_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering old nettools info."""
m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '')
@@ -36,8 +36,8 @@ class TestNetInfo(CiTestCase):
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_new_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering netdev new nettools info."""
m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '')
@@ -45,8 +45,8 @@ class TestNetInfo(CiTestCase):
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering netdev new nettools info."""
m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '')
@@ -57,8 +57,8 @@ class TestNetInfo(CiTestCase):
print()
self.assertEqual(FREEBSD_NETDEV_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_iproute_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering ip route info."""
m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '')
@@ -72,8 +72,8 @@ class TestNetInfo(CiTestCase):
'255.0.0.0 | . |', '255.0.0.0 | host |')
self.assertEqual(new_output, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_warn_on_missing_commands(self, m_subp, m_which):
"""netdev_pformat warns when missing both ip and 'netstat'."""
m_which.return_value = None # Niether ip nor netstat found
@@ -85,8 +85,8 @@ class TestNetInfo(CiTestCase):
self.logs.getvalue())
m_subp.assert_not_called()
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_info_nettools_down(self, m_subp, m_which):
"""test netdev_info using nettools and down interfaces."""
m_subp.return_value = (
@@ -100,8 +100,8 @@ class TestNetInfo(CiTestCase):
'hwaddr': '.', 'up': True}},
netdev_info("."))
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_info_iproute_down(self, m_subp, m_which):
"""Test netdev_info with ip and down interfaces."""
m_subp.return_value = (
@@ -130,8 +130,8 @@ class TestNetInfo(CiTestCase):
readResource("netinfo/netdev-formatted-output-down"),
netdev_pformat())
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_nettools_pformat(self, m_subp, m_which):
"""route_pformat properly rendering nettools route info."""
@@ -147,8 +147,8 @@ class TestNetInfo(CiTestCase):
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_iproute_pformat(self, m_subp, m_which):
"""route_pformat properly rendering ip route info."""
@@ -165,8 +165,8 @@ class TestNetInfo(CiTestCase):
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_warn_on_missing_commands(self, m_subp, m_which):
"""route_pformat warns when missing both ip and 'netstat'."""
m_which.return_value = None # Niether ip nor netstat found
diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py
index 448097d3..911c1f3d 100644
--- a/cloudinit/tests/test_subp.py
+++ b/cloudinit/tests/test_subp.py
@@ -2,10 +2,21 @@
"""Tests for cloudinit.subp utility functions"""
-from cloudinit import subp
+import json
+import os
+import sys
+import stat
+
+from unittest import mock
+
+from cloudinit import subp, util
from cloudinit.tests.helpers import CiTestCase
+BASH = subp.which('bash')
+BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
+
+
class TestPrependBaseCommands(CiTestCase):
with_logs = True
@@ -58,4 +69,218 @@ class TestPrependBaseCommands(CiTestCase):
self.assertEqual('', self.logs.getvalue())
self.assertEqual(expected, fixed_commands)
+
+class TestSubp(CiTestCase):
+ allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE,
+ BOGUS_COMMAND, sys.executable]
+
+ stdin2err = [BASH, '-c', 'cat >&2']
+ stdin2out = ['cat']
+ utf8_invalid = b'ab\xaadef'
+ utf8_valid = b'start \xc3\xa9 end'
+ utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
+ printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
+
+ def printf_cmd(self, *args):
+ # bash's printf supports \xaa. So does /usr/bin/printf
+ # but by using bash, we remove dependency on another program.
+ return([BASH, '-c', 'printf "$@"', 'printf'] + list(args))
+
+ def test_subp_handles_bytestrings(self):
+ """subp can run a bytestring command if shell is True."""
+ tmp_file = self.tmp_path('test.out')
+ cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True)
+ self.assertEqual(u'', out)
+ self.assertEqual(u'', _err)
+ self.assertEqual('HI MOM\n', util.load_file(tmp_file))
+
+ def test_subp_handles_strings(self):
+ """subp can run a string command if shell is True."""
+ tmp_file = self.tmp_path('test.out')
+ cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd, shell=True)
+ self.assertEqual(u'', out)
+ self.assertEqual(u'', _err)
+ self.assertEqual('HI MOM\n', util.load_file(tmp_file))
+
+ def test_subp_handles_utf8(self):
+ # The given bytes contain utf-8 accented characters as seen in e.g.
+ # the "deja dup" package in Ubuntu.
+ cmd = self.printf_cmd(self.utf8_valid_2)
+ (out, _err) = subp.subp(cmd, capture=True)
+ self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
+
+ def test_subp_respects_decode_false(self):
+ (out, err) = subp.subp(self.stdin2out, capture=True, decode=False,
+ data=self.utf8_valid)
+ self.assertTrue(isinstance(out, bytes))
+ self.assertTrue(isinstance(err, bytes))
+ self.assertEqual(out, self.utf8_valid)
+
+ def test_subp_decode_ignore(self):
+ # this executes a string that writes invalid utf-8 to stdout
+ (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'),
+ capture=True, decode='ignore')
+ self.assertEqual(out, 'abcdef')
+
+ def test_subp_decode_strict_valid_utf8(self):
+ (out, _err) = subp.subp(self.stdin2out, capture=True,
+ decode='strict', data=self.utf8_valid)
+ self.assertEqual(out, self.utf8_valid.decode('utf-8'))
+
+ def test_subp_decode_invalid_utf8_replaces(self):
+ (out, _err) = subp.subp(self.stdin2out, capture=True,
+ data=self.utf8_invalid)
+ expected = self.utf8_invalid.decode('utf-8', 'replace')
+ self.assertEqual(out, expected)
+
+ def test_subp_decode_strict_raises(self):
+ args = []
+ kwargs = {'args': self.stdin2out, 'capture': True,
+ 'decode': 'strict', 'data': self.utf8_invalid}
+ self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs)
+
+ def test_subp_capture_stderr(self):
+ data = b'hello world'
+ (out, err) = subp.subp(self.stdin2err, capture=True,
+ decode=False, data=data,
+ update_env={'LC_ALL': 'C'})
+ self.assertEqual(err, data)
+ self.assertEqual(out, b'')
+
+ def test_subp_reads_env(self):
+ with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
+ out, _err = subp.subp(self.printenv + ['FOO'], capture=True)
+ self.assertEqual('FOO=BAR', out.splitlines()[0])
+
+ def test_subp_env_and_update_env(self):
+ out, _err = subp.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ env={'FOO': 'BAR'},
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines())
+
+ def test_subp_update_env(self):
+ extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
+ with mock.patch.dict("os.environ", values=extra):
+ out, _err = subp.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines())
+
+ def test_subp_warn_missing_shebang(self):
+ """Warn on no #! in script"""
+ noshebang = self.tmp_path('noshebang')
+ util.write_file(noshebang, 'true\n')
+
+ print("os is %s" % os)
+ os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
+ with self.allow_subp([noshebang]):
+ self.assertRaisesRegex(subp.ProcessExecutionError,
+ r'Missing #! in script\?',
+ subp.subp, (noshebang,))
+
+ def test_subp_combined_stderr_stdout(self):
+ """Providing combine_capture as True redirects stderr to stdout."""
+ data = b'hello world'
+ (out, err) = subp.subp(self.stdin2err, capture=True,
+ combine_capture=True, decode=False, data=data)
+ self.assertEqual(b'', err)
+ self.assertEqual(data, out)
+
+ def test_returns_none_if_no_capture(self):
+ (out, err) = subp.subp(self.stdin2out, data=b'', capture=False)
+ self.assertIsNone(err)
+ self.assertIsNone(out)
+
+ def test_exception_has_out_err_are_bytes_if_decode_false(self):
+ """Raised exc should have stderr, stdout as bytes if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=False)
+ self.assertTrue(isinstance(cm.exception.stdout, bytes))
+ self.assertTrue(isinstance(cm.exception.stderr, bytes))
+
+ def test_exception_has_out_err_are_bytes_if_decode_true(self):
+ """Raised exc should have stderr, stdout as string if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=True)
+ self.assertTrue(isinstance(cm.exception.stdout, str))
+ self.assertTrue(isinstance(cm.exception.stderr, str))
+
+ def test_bunch_of_slashes_in_path(self):
+ self.assertEqual("/target/my/path/",
+ subp.target_path("/target/", "//my/path/"))
+ self.assertEqual("/target/my/path/",
+ subp.target_path("/target/", "///my/path/"))
+
+ def test_c_lang_can_take_utf8_args(self):
+ """Independent of system LC_CTYPE, args can contain utf-8 strings.
+
+ When python starts up, its default encoding gets set based on
+ the value of LC_CTYPE. If no system locale is set, the default
+ encoding for both python2 and python3 in some paths will end up
+ being ascii.
+
+ Attempts to use setlocale or patching (or changing) os.environ
+ in the current environment seem to not be effective.
+
+ This test starts up a python with LC_CTYPE set to C so that
+ the default encoding will be set to ascii. In such an environment
+ Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
+ """
+ python_prog = '\n'.join([
+ 'import json, sys',
+ 'from cloudinit.subp import subp',
+ 'data = sys.stdin.read()',
+ 'cmd = json.loads(data)',
+ 'subp(cmd, capture=False)',
+ ''])
+ cmd = [BASH, '-c', 'echo -n "$@"', '--',
+ self.utf8_valid.decode("utf-8")]
+ python_subp = [sys.executable, '-c', python_prog]
+
+ out, _err = subp.subp(
+ python_subp, update_env={'LC_CTYPE': 'C'},
+ data=json.dumps(cmd).encode("utf-8"),
+ decode=False)
+ self.assertEqual(self.utf8_valid, out)
+
+ def test_bogus_command_logs_status_messages(self):
+ """status_cb gets status messages logs on bogus commands provided."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BOGUS_COMMAND], status_cb=status_cb)
+
+ expected = [
+ 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND),
+ 'ERROR: End run command: invalid command provided\n']
+ self.assertEqual(expected, logs)
+
+ def test_command_logs_exit_codes_to_status_cb(self):
+ """status_cb gets status messages containing command exit code."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb)
+ subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb)
+
+ expected = [
+ 'Begin run command: %s -c exit 2\n' % BASH,
+ 'ERROR: End run command: exit(2)\n',
+ 'Begin run command: %s -c exit 0\n' % BASH,
+ 'End run command: exit(0)\n']
+ self.assertEqual(expected, logs)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index bfccfe1e..096a3037 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -9,6 +9,7 @@ import platform
import pytest
import cloudinit.util as util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase, mock
from textwrap import dedent
@@ -332,7 +333,7 @@ class TestBlkid(CiTestCase):
"PARTUUID": self.ids["id09"]},
})
- @mock.patch("cloudinit.util.subp")
+ @mock.patch("cloudinit.subp.subp")
def test_functional_blkid(self, m_subp):
m_subp.return_value = (
self.blkid_out.format(**self.ids), "")
@@ -340,7 +341,7 @@ class TestBlkid(CiTestCase):
m_subp.assert_called_with(["blkid", "-o", "full"], capture=True,
decode="replace")
- @mock.patch("cloudinit.util.subp")
+ @mock.patch("cloudinit.subp.subp")
def test_blkid_no_cache_uses_no_cache(self, m_subp):
"""blkid should turn off cache if disable_cache is true."""
m_subp.return_value = (
@@ -351,7 +352,7 @@ class TestBlkid(CiTestCase):
capture=True, decode="replace")
-@mock.patch('cloudinit.util.subp')
+@mock.patch('cloudinit.subp.subp')
class TestUdevadmSettle(CiTestCase):
def test_with_no_params(self, m_subp):
"""called with no parameters."""
@@ -396,8 +397,8 @@ class TestUdevadmSettle(CiTestCase):
'--timeout=%s' % timeout])
def test_subp_exception_raises_to_caller(self, m_subp):
- m_subp.side_effect = util.ProcessExecutionError("BOOM")
- self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)
+ m_subp.side_effect = subp.ProcessExecutionError("BOOM")
+ self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle)
@mock.patch('os.path.exists')
@@ -703,4 +704,71 @@ class TestReadCcFromCmdline:
assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline)
+class TestMountCb:
+ """Tests for ``util.mount_cb``.
+
+ These tests consider the "unit" under test to be ``util.mount_cb`` and
+ ``util.unmounter``, which is only used by ``mount_cb``.
+
+ TODO: Test default mtype determination
+ TODO: Test the if/else branch that actually performs the mounting operation
+ """
+
+ @pytest.yield_fixture
+ def already_mounted_device_and_mountdict(self):
+ """Mock an already-mounted device, and yield (device, mount dict)"""
+ device = "/dev/fake0"
+ mountpoint = "/mnt/fake"
+ with mock.patch("cloudinit.util.subp.subp"):
+ with mock.patch("cloudinit.util.mounts") as m_mounts:
+ mounts = {device: {"mountpoint": mountpoint}}
+ m_mounts.return_value = mounts
+ yield device, mounts[device]
+
+ @pytest.fixture
+ def already_mounted_device(self, already_mounted_device_and_mountdict):
+ """already_mounted_device_and_mountdict, but return only the device"""
+ return already_mounted_device_and_mountdict[0]
+
+ @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()])
+ def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype):
+ with pytest.raises(TypeError):
+ util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
+
+ @mock.patch("cloudinit.util.subp.subp")
+ def test_already_mounted_does_not_mount_or_umount_anything(
+ self, m_subp, already_mounted_device
+ ):
+ util.mount_cb(already_mounted_device, mock.Mock())
+
+ assert 0 == m_subp.call_count
+
+ @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""])
+ def test_already_mounted_calls_callback(
+ self, trailing_slash_in_mounts, already_mounted_device_and_mountdict
+ ):
+ device, mount_dict = already_mounted_device_and_mountdict
+ mountpoint = mount_dict["mountpoint"]
+ mount_dict["mountpoint"] += trailing_slash_in_mounts
+
+ callback = mock.Mock()
+ util.mount_cb(device, callback)
+
+ # The mountpoint passed to callback should always have a trailing
+ # slash, regardless of the input
+ assert [mock.call(mountpoint + "/")] == callback.call_args_list
+
+ def test_already_mounted_calls_callback_with_data(
+ self, already_mounted_device
+ ):
+ callback = mock.Mock()
+ util.mount_cb(
+ already_mounted_device, callback, data=mock.sentinel.data
+ )
+
+ assert [
+ mock.call(mock.ANY, mock.sentinel.data)
+ ] == callback.call_args_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 670dbee6..67bdf981 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -16,6 +16,7 @@ from email.mime.text import MIMEText
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import features
from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
@@ -69,6 +70,13 @@ def _set_filename(msg, filename):
'attachment', filename=str(filename))
+def _handle_error(error_message, source_exception=None):
+ if features.ERROR_ON_USER_DATA_FAILURE:
+ raise Exception(error_message) from source_exception
+ else:
+ LOG.warning(error_message)
+
+
class UserDataProcessor(object):
def __init__(self, paths):
self.paths = paths
@@ -108,9 +116,11 @@ class UserDataProcessor(object):
ctype_orig = None
was_compressed = True
except util.DecompressionError as e:
- LOG.warning("Failed decompressing payload from %s of"
- " length %s due to: %s",
- ctype_orig, len(payload), e)
+ error_message = (
+ "Failed decompressing payload from {} of"
+ " length {} due to: {}".format(
+ ctype_orig, len(payload), e))
+ _handle_error(error_message, e)
continue
# Attempt to figure out the payloads content-type
@@ -231,19 +241,22 @@ class UserDataProcessor(object):
if resp.ok():
content = resp.contents
else:
- LOG.warning(("Fetching from %s resulted in"
- " a invalid http code of %s"),
- include_url, resp.code)
+ error_message = (
+ "Fetching from {} resulted in"
+ " a invalid http code of {}".format(
+ include_url, resp.code))
+ _handle_error(error_message)
except UrlError as urle:
message = str(urle)
# Older versions of requests.exceptions.HTTPError may not
# include the errant url. Append it for clarity in logs.
if include_url not in message:
message += ' for url: {0}'.format(include_url)
- LOG.warning(message)
+ _handle_error(message, urle)
except IOError as ioe:
- LOG.warning("Fetching from %s resulted in %s",
- include_url, ioe)
+ error_message = "Fetching from {} resulted in {}".format(
+ include_url, ioe)
+ _handle_error(error_message, ioe)
if content is not None:
new_msg = convert_string(content)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 985e7d20..b6f1117f 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -32,12 +32,13 @@ import subprocess
import sys
import time
from base64 import b64decode, b64encode
-from errno import ENOENT, ENOEXEC
+from errno import ENOENT
from functools import lru_cache
from urllib import parse
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import (
mergers,
safeyaml,
@@ -74,8 +75,8 @@ def get_dpkg_architecture(target=None):
N.B. This function is wrapped in functools.lru_cache, so repeated calls
won't shell out every time.
"""
- out, _ = subp(['dpkg', '--print-architecture'], capture=True,
- target=target)
+ out, _ = subp.subp(['dpkg', '--print-architecture'], capture=True,
+ target=target)
return out.strip()
@@ -86,7 +87,8 @@ def lsb_release(target=None):
data = {}
try:
- out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
+ out, _ = subp.subp(['lsb_release', '--all'], capture=True,
+ target=target)
for line in out.splitlines():
fname, _, val = line.partition(":")
if fname in fmap:
@@ -96,35 +98,13 @@ def lsb_release(target=None):
LOG.warning("Missing fields in lsb_release --all output: %s",
','.join(missing))
- except ProcessExecutionError as err:
+ except subp.ProcessExecutionError as err:
LOG.warning("Unable to get lsb_release --all: %s", err)
data = dict((v, "UNAVAILABLE") for v in fmap.values())
return data
-def target_path(target, path=None):
- # return 'path' inside target, accepting target as None
- if target in (None, ""):
- target = "/"
- elif not isinstance(target, str):
- raise ValueError("Unexpected input for target: %s" % target)
- else:
- target = os.path.abspath(target)
- # abspath("//") returns "//" specifically for 2 slashes.
- if target.startswith("//"):
- target = target[1:]
-
- if not path:
- return target
-
- # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
- while len(path) and path[0] == "/":
- path = path[1:]
-
- return os.path.join(target, path)
-
-
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
if isinstance(blob, str):
@@ -201,91 +181,6 @@ DMIDECODE_TO_DMI_SYS_MAPPING = {
}
-class ProcessExecutionError(IOError):
-
- MESSAGE_TMPL = ('%(description)s\n'
- 'Command: %(cmd)s\n'
- 'Exit code: %(exit_code)s\n'
- 'Reason: %(reason)s\n'
- 'Stdout: %(stdout)s\n'
- 'Stderr: %(stderr)s')
- empty_attr = '-'
-
- def __init__(self, stdout=None, stderr=None,
- exit_code=None, cmd=None,
- description=None, reason=None,
- errno=None):
- if not cmd:
- self.cmd = self.empty_attr
- else:
- self.cmd = cmd
-
- if not description:
- if not exit_code and errno == ENOEXEC:
- self.description = 'Exec format error. Missing #! in script?'
- else:
- self.description = 'Unexpected error while running command.'
- else:
- self.description = description
-
- if not isinstance(exit_code, int):
- self.exit_code = self.empty_attr
- else:
- self.exit_code = exit_code
-
- if not stderr:
- if stderr is None:
- self.stderr = self.empty_attr
- else:
- self.stderr = stderr
- else:
- self.stderr = self._indent_text(stderr)
-
- if not stdout:
- if stdout is None:
- self.stdout = self.empty_attr
- else:
- self.stdout = stdout
- else:
- self.stdout = self._indent_text(stdout)
-
- if reason:
- self.reason = reason
- else:
- self.reason = self.empty_attr
-
- self.errno = errno
- message = self.MESSAGE_TMPL % {
- 'description': self._ensure_string(self.description),
- 'cmd': self._ensure_string(self.cmd),
- 'exit_code': self._ensure_string(self.exit_code),
- 'stdout': self._ensure_string(self.stdout),
- 'stderr': self._ensure_string(self.stderr),
- 'reason': self._ensure_string(self.reason),
- }
- IOError.__init__(self, message)
-
- def _ensure_string(self, text):
- """
- if data is bytes object, decode
- """
- return text.decode() if isinstance(text, bytes) else text
-
- def _indent_text(self, text, indent_level=8):
- """
- indent text on all but the first line, allowing for easy to read output
- """
- cr = '\n'
- indent = ' ' * indent_level
- # if input is bytes, return bytes
- if isinstance(text, bytes):
- cr = cr.encode()
- indent = indent.encode()
- # remove any newlines at end of text first to prevent unneeded blank
- # line in output
- return text.rstrip(cr).replace(cr, cr + indent)
-
-
class SeLinuxGuard(object):
def __init__(self, path, recursive=False):
# Late import since it might not always
@@ -854,37 +749,6 @@ def del_dir(path):
shutil.rmtree(path)
-def runparts(dirp, skip_no_exist=True, exe_prefix=None):
- if skip_no_exist and not os.path.isdir(dirp):
- return
-
- failed = []
- attempted = []
-
- if exe_prefix is None:
- prefix = []
- elif isinstance(exe_prefix, str):
- prefix = [str(exe_prefix)]
- elif isinstance(exe_prefix, list):
- prefix = exe_prefix
- else:
- raise TypeError("exe_prefix must be None, str, or list")
-
- for exe_name in sorted(os.listdir(dirp)):
- exe_path = os.path.join(dirp, exe_name)
- if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
- attempted.append(exe_path)
- try:
- subp(prefix + [exe_path], capture=False)
- except ProcessExecutionError as e:
- logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
- failed.append(e)
-
- if failed and attempted:
- raise RuntimeError('Runparts: %s failures in %s attempted commands'
- % (len(failed), len(attempted)))
-
-
# read_optional_seed
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
@@ -1271,10 +1135,10 @@ def find_devs_with_netbsd(criteria=None, oformat='device',
label = criteria.lstrip("LABEL=")
if criteria.startswith("TYPE="):
_type = criteria.lstrip("TYPE=")
- out, _err = subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
for dev in out.split():
if label or _type:
- mscdlabel_out, _ = subp(['mscdlabel', dev], rcs=[0, 1])
+ mscdlabel_out, _ = subp.subp(['mscdlabel', dev], rcs=[0, 1])
if label and not ('label "%s"' % label) in mscdlabel_out:
continue
if _type == "iso9660" and "ISO filesystem" not in mscdlabel_out:
@@ -1287,7 +1151,7 @@ def find_devs_with_netbsd(criteria=None, oformat='device',
def find_devs_with_openbsd(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
- out, _err = subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
devlist = []
for entry in out.split(','):
if not entry.endswith(':'):
@@ -1353,8 +1217,8 @@ def find_devs_with(criteria=None, oformat='device',
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
try:
- (out, _err) = subp(cmd, rcs=[0, 2])
- except ProcessExecutionError as e:
+ (out, _err) = subp.subp(cmd, rcs=[0, 2])
+ except subp.ProcessExecutionError as e:
if e.errno == ENOENT:
# blkid not found...
out = ""
@@ -1389,7 +1253,7 @@ def blkid(devs=None, disable_cache=False):
# we have to decode with 'replace' as shelx.split (called by
# load_shell_content) can't take bytes. So this is potentially
# lossy of non-utf-8 chars in blkid output.
- out, _ = subp(cmd, capture=True, decode="replace")
+ out, _ = subp.subp(cmd, capture=True, decode="replace")
ret = {}
for line in out.splitlines():
dev, _, data = line.partition(":")
@@ -1709,7 +1573,7 @@ def unmounter(umount):
finally:
if umount:
umount_cmd = ["umount", umount]
- subp(umount_cmd)
+ subp.subp(umount_cmd)
def mounts():
@@ -1720,7 +1584,7 @@ def mounts():
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
- (mountoutput, _err) = subp("mount")
+ (mountoutput, _err) = subp.subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
@@ -1804,7 +1668,7 @@ def mount_cb(device, callback, data=None, mtype=None,
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
- subp(mountcmd, update_env=update_env_for_mount)
+ subp.subp(mountcmd, update_env=update_env_for_mount)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
break
@@ -1936,7 +1800,15 @@ def chmod(path, mode):
os.chmod(path, real_mode)
-def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
+def write_file(
+ filename,
+ content,
+ mode=0o644,
+ omode="wb",
+ preserve_mode=False,
+ *,
+ ensure_dir_exists=True
+):
"""
Writes a file with the given content and sets the file mode as specified.
Restores the SELinux context if possible.
@@ -1945,16 +1817,22 @@ def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (w, wb, a, etc.)
+ @param preserve_mode: If True and `filename` exists, preserve `filename`s
+ current mode instead of applying `mode`.
+ @param ensure_dir_exists: If True (the default), ensure that the directory
+ containing `filename` exists before writing to
+ the file.
"""
- if copy_mode:
+ if preserve_mode:
try:
file_stat = os.stat(filename)
mode = stat.S_IMODE(file_stat.st_mode)
except OSError:
pass
- ensure_dir(os.path.dirname(filename))
+ if ensure_dir_exists:
+ ensure_dir(os.path.dirname(filename))
if 'b' in omode.lower():
content = encode_text(content)
write_type = 'bytes'
@@ -1988,185 +1866,6 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def subp_blob_in_tempfile(blob, *args, **kwargs):
- """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
-
- 'basename' as a kwarg allows providing the basename for the file.
- The 'args' argument to subp will be updated with the full path to the
- filename as the first argument.
- """
- basename = kwargs.pop('basename', "subp_blob")
-
- if len(args) == 0 and 'args' not in kwargs:
- args = [tuple()]
-
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- with temp_utils.tempdir(needs_exe=True) as tmpd:
- tmpf = os.path.join(tmpd, basename)
- if 'args' in kwargs:
- kwargs['args'] = [tmpf] + list(kwargs['args'])
- else:
- args = list(args)
- args[0] = [tmpf] + args[0]
-
- write_file(tmpf, blob, mode=0o700)
- return subp(*args, **kwargs)
-
-
-def subp(args, data=None, rcs=None, env=None, capture=True,
- combine_capture=False, shell=False,
- logstring=False, decode="replace", target=None, update_env=None,
- status_cb=None):
- """Run a subprocess.
-
- :param args: command to run in a list. [cmd, arg1, arg2...]
- :param data: input to the command, made available on its stdin.
- :param rcs:
- a list of allowed return codes. If subprocess exits with a value not
- in this list, a ProcessExecutionError will be raised. By default,
- data is returned as a string. See 'decode' parameter.
- :param env: a dictionary for the command's environment.
- :param capture:
- boolean indicating if output should be captured. If True, then stderr
- and stdout will be returned. If False, they will not be redirected.
- :param combine_capture:
- boolean indicating if stderr should be redirected to stdout. When True,
- interleaved stderr and stdout will be returned as the first element of
- a tuple, the second will be empty string or bytes (per decode).
- if combine_capture is True, then output is captured independent of
- the value of capture.
- :param shell: boolean indicating if this should be run with a shell.
- :param logstring:
- the command will be logged to DEBUG. If it contains info that should
- not be logged, then logstring will be logged instead.
- :param decode:
- if False, no decoding will be done and returned stdout and stderr will
- be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
- These values are passed through to bytes().decode() as the 'errors'
- parameter. There is no support for decoding to other than utf-8.
- :param target:
- not supported, kwarg present only to make function signature similar
- to curtin's subp.
- :param update_env:
- update the enviornment for this command with this dictionary.
- this will not affect the current processes os.environ.
- :param status_cb:
- call this fuction with a single string argument before starting
- and after finishing.
-
- :return
- if not capturing, return is (None, None)
- if capturing, stdout and stderr are returned.
- if decode:
- entries in tuple will be python2 unicode or python3 string
- if not decode:
- entries in tuple will be python2 string or python3 bytes
- """
-
- # not supported in cloud-init (yet), for now kept in the call signature
- # to ease maintaining code shared between cloud-init and curtin
- if target is not None:
- raise ValueError("target arg not supported by cloud-init")
-
- if rcs is None:
- rcs = [0]
-
- devnull_fp = None
-
- if update_env:
- if env is None:
- env = os.environ
- env = env.copy()
- env.update(update_env)
-
- if target_path(target) != "/":
- args = ['chroot', target] + list(args)
-
- if status_cb:
- command = ' '.join(args) if isinstance(args, list) else args
- status_cb('Begin run command: {command}\n'.format(command=command))
- if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"),
- args, rcs, shell, 'combine' if combine_capture else capture)
- else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
-
- stdin = None
- stdout = None
- stderr = None
- if capture:
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- if combine_capture:
- stdout = subprocess.PIPE
- stderr = subprocess.STDOUT
- if data is None:
- # using devnull assures any reads get null, rather
- # than possibly waiting on input.
- devnull_fp = open(os.devnull)
- stdin = devnull_fp
- else:
- stdin = subprocess.PIPE
- if not isinstance(data, bytes):
- data = data.encode()
-
- # Popen converts entries in the arguments array from non-bytes to bytes.
- # When locale is unset it may use ascii for that encoding which can
- # cause UnicodeDecodeErrors. (LP: #1751051)
- if isinstance(args, bytes):
- bytes_args = args
- elif isinstance(args, str):
- bytes_args = args.encode("utf-8")
- else:
- bytes_args = [
- x if isinstance(x, bytes) else x.encode("utf-8")
- for x in args]
- try:
- sp = subprocess.Popen(bytes_args, stdout=stdout,
- stderr=stderr, stdin=stdin,
- env=env, shell=shell)
- (out, err) = sp.communicate(data)
- except OSError as e:
- if status_cb:
- status_cb('ERROR: End run command: invalid command provided\n')
- raise ProcessExecutionError(
- cmd=args, reason=e, errno=e.errno,
- stdout="-" if decode else b"-",
- stderr="-" if decode else b"-")
- finally:
- if devnull_fp:
- devnull_fp.close()
-
- # Just ensure blank instead of none.
- if capture or combine_capture:
- if not out:
- out = b''
- if not err:
- err = b''
- if decode:
- def ldecode(data, m='utf-8'):
- if not isinstance(data, bytes):
- return data
- return data.decode(m, decode)
-
- out = ldecode(out)
- err = ldecode(err)
-
- rc = sp.returncode
- if rc not in rcs:
- if status_cb:
- status_cb(
- 'ERROR: End run command: exit({code})\n'.format(code=rc))
- raise ProcessExecutionError(stdout=out, stderr=err,
- exit_code=rc,
- cmd=args)
- if status_cb:
- status_cb('End run command: exit({code})\n'.format(code=rc))
- return (out, err)
-
-
def make_header(comment_char="#", base='created'):
ci_ver = version.version_string()
header = str(comment_char)
@@ -2175,8 +1874,8 @@ def make_header(comment_char="#", base='created'):
return header
-def abs_join(*paths):
- return os.path.abspath(os.path.join(*paths))
+def abs_join(base, *paths):
+ return os.path.abspath(os.path.join(base, *paths))
# shellify, takes a list of commands
@@ -2232,7 +1931,7 @@ def is_container():
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
- subp(helper)
+ subp.subp(helper)
return True
except (IOError, OSError):
pass
@@ -2438,7 +2137,7 @@ def find_freebsd_part(fs):
return splitted[2]
elif splitted[2] in ['label', 'gpt', 'ufs']:
target_label = fs[5:]
- (part, _err) = subp(['glabel', 'status', '-s'])
+ (part, _err) = subp.subp(['glabel', 'status', '-s'])
for labels in part.split("\n"):
items = labels.split()
if len(items) > 0 and items[0] == target_label:
@@ -2460,10 +2159,10 @@ def get_path_dev_freebsd(path, mnt_list):
def get_mount_info_freebsd(path):
- (result, err) = subp(['mount', '-p', path], rcs=[0, 1])
+ (result, err) = subp.subp(['mount', '-p', path], rcs=[0, 1])
if len(err):
# find a path if the input is not a mounting point
- (mnt_list, err) = subp(['mount', '-p'])
+ (mnt_list, err) = subp.subp(['mount', '-p'])
path_found = get_path_dev_freebsd(path, mnt_list)
if (path_found is None):
return None
@@ -2479,8 +2178,8 @@ def get_device_info_from_zpool(zpool):
LOG.debug('Cannot get zpool info, no /dev/zfs')
return None
try:
- (zpoolstatus, err) = subp(['zpool', 'status', zpool])
- except ProcessExecutionError as err:
+ (zpoolstatus, err) = subp.subp(['zpool', 'status', zpool])
+ except subp.ProcessExecutionError as err:
LOG.warning("Unable to get zpool status of %s: %s", zpool, err)
return None
if len(err):
@@ -2494,7 +2193,7 @@ def get_device_info_from_zpool(zpool):
def parse_mount(path):
- (mountoutput, _err) = subp(['mount'])
+ (mountoutput, _err) = subp.subp(['mount'])
mount_locs = mountoutput.splitlines()
# there are 2 types of mount outputs we have to parse therefore
# the regex is a bit complex. to better understand this regex see:
@@ -2567,40 +2266,6 @@ def get_mount_info(path, log=LOG, get_mnt_opts=False):
return parse_mount(path)
-def is_exe(fpath):
- # return boolean indicating if fpath exists and is executable.
- return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
-
-def which(program, search=None, target=None):
- target = target_path(target)
-
- if os.path.sep in program:
- # if program had a '/' in it, then do not search PATH
- # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
- # so effectively we set cwd to / (or target)
- if is_exe(target_path(target, program)):
- return program
-
- if search is None:
- paths = [p.strip('"') for p in
- os.environ.get("PATH", "").split(os.pathsep)]
- if target == "/":
- search = paths
- else:
- search = [p for p in paths if p.startswith("/")]
-
- # normalize path input
- search = [os.path.abspath(p) for p in search]
-
- for path in search:
- ppath = os.path.sep.join((path, program))
- if is_exe(target_path(target, ppath)):
- return ppath
-
- return None
-
-
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
if args is None:
args = []
@@ -2764,7 +2429,7 @@ def _call_dmidecode(key, dmidecode_path):
"""
try:
cmd = [dmidecode_path, "--string", key]
- (result, _err) = subp(cmd)
+ (result, _err) = subp.subp(cmd)
result = result.strip()
LOG.debug("dmidecode returned '%s' for '%s'", result, key)
if result.replace(".", "") == "":
@@ -2818,7 +2483,8 @@ def read_dmi_data(key):
LOG.debug("dmidata is not supported on %s", uname_arch)
return None
- dmidecode_path = which('dmidecode')
+ print("hi, now its: %s\n", subp)
+ dmidecode_path = subp.which('dmidecode')
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
@@ -2834,7 +2500,7 @@ def message_from_string(string):
def get_installed_packages(target=None):
- (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
+ (out, _) = subp.subp(['dpkg-query', '--list'], target=target, capture=True)
pkgs_inst = set()
for line in out.splitlines():
@@ -2970,7 +2636,7 @@ def udevadm_settle(exists=None, timeout=None):
if timeout:
settle_cmd.extend(['--timeout=%s' % timeout])
- return subp(settle_cmd)
+ return subp.subp(settle_cmd)
def get_proc_ppid(pid):
diff --git a/config/cloud.cfg.d/05_logging.cfg b/config/cloud.cfg.d/05_logging.cfg
index 937b07f8..bf917a95 100644
--- a/config/cloud.cfg.d/05_logging.cfg
+++ b/config/cloud.cfg.d/05_logging.cfg
@@ -44,7 +44,7 @@ _log:
class=FileHandler
level=DEBUG
formatter=arg0Formatter
- args=('/var/log/cloud-init.log',)
+ args=('/var/log/cloud-init.log', 'a', 'UTF-8')
- &log_syslog |
[handler_cloudLogHandler]
class=handlers.SysLogHandler
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 1bb97f83..b44cbce7 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -70,8 +70,8 @@ cloud_init_modules:
{% endif %}
- set_hostname
- update_hostname
-{% if variant not in ["freebsd", "netbsd"] %}
- update_etc_hosts
+{% if not variant.endswith("bsd") %}
- ca-certs
- rsyslog
{% endif %}
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 00000000..faf13804
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,95 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit import subp
+
+
+def _closest_marker_args_or(request, marker_name: str, default):
+ """Get the args for the closest ``marker_name`` or return ``default``"""
+ try:
+ marker = request.node.get_closest_marker(marker_name)
+ except AttributeError:
+ # Older versions of pytest don't have the new API
+ marker = request.node.get_marker(marker_name)
+ if marker is not None:
+ return marker.args
+ return default
+
+
+@pytest.yield_fixture(autouse=True)
+def disable_subp_usage(request):
+ """
+ Across all (pytest) tests, ensure that subp.subp is not invoked.
+
+ Note that this can only catch invocations where the util module is imported
+ and ``subp.subp(...)`` is called. ``from cloudinit.subp mport subp``
+ imports happen before the patching here (or the CiTestCase monkey-patching)
+ happens, so are left untouched.
+
+ To allow a particular test method or class to use subp.subp you can mark it
+ as such::
+
+ @pytest.mark.allow_all_subp
+ def test_whoami(self):
+ subp.subp(["whoami"])
+
+ To instead allow subp.subp usage for a specific command, you can use the
+ ``allow_subp_for`` mark::
+
+ @pytest.mark.allow_subp_for("bash")
+ def test_bash(self):
+ subp.subp(["bash"])
+
+ You can pass multiple commands as values; they will all be permitted::
+
+ @pytest.mark.allow_subp_for("bash", "whoami")
+ def test_several_things(self):
+ subp.subp(["bash"])
+ subp.subp(["whoami"])
+
+ This fixture (roughly) mirrors the functionality of
+ CiTestCase.allowed_subp. N.B. While autouse fixtures do affect non-pytest
+ tests, CiTestCase's allowed_subp does take precedence (and we have
+ TestDisableSubpUsageInTestSubclass to confirm that).
+ """
+ allow_subp_for = _closest_marker_args_or(request, "allow_subp_for", None)
+ # Because the mark doesn't take arguments, `allow_all_subp` will be set to
+ # [] if the marker is present, so explicit None checks are required
+ allow_all_subp = _closest_marker_args_or(request, "allow_all_subp", None)
+
+ if allow_all_subp is not None and allow_subp_for is None:
+ # Only allow_all_subp specified, don't mock subp.subp
+ yield
+ return
+
+ if allow_all_subp is None and allow_subp_for is None:
+ # No marks, default behaviour; disallow all subp.subp usage
+ def side_effect(args, *other_args, **kwargs):
+ raise AssertionError("Unexpectedly used subp.subp")
+
+ elif allow_all_subp is not None and allow_subp_for is not None:
+ # Both marks, ambiguous request; raise an exception on all subp usage
+ def side_effect(args, *other_args, **kwargs):
+ raise AssertionError(
+ "Test marked both allow_all_subp and allow_subp_for: resolve"
+ " this either by modifying your test code, or by modifying"
+ " disable_subp_usage to handle precedence."
+ )
+ else:
+ # Look this up before our patch is in place, so we have access to
+ # the real implementation in side_effect
+ real_subp = subp.subp
+
+ def side_effect(args, *other_args, **kwargs):
+ cmd = args[0]
+ if cmd not in allow_subp_for:
+ raise AssertionError(
+ "Unexpectedly used subp.subp to call {} (allowed:"
+ " {})".format(cmd, ",".join(allow_subp_for))
+ )
+ return real_subp(args, *other_args, **kwargs)
+
+ with mock.patch("cloudinit.subp.subp", autospec=True) as m_subp:
+ m_subp.side_effect = side_effect
+ yield
diff --git a/debian/changelog b/debian/changelog
index 3054f97d..be3b5753 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,75 @@
+cloud-init (20.2-94-g3d06abc2-0ubuntu1) groovy; urgency=medium
+
+ * New upstream snapshot.
+ - cc_mounts: handle missing fstab (#484) (LP: #1886531)
+ - LXD cloud_tests: support more lxd image formats (#482) [Paride Legovini]
+ - Add update_etc_hosts as default module on *BSD (#479) [Adam Dobrawy]
+ - cloudinit: fix tip-pylint failures and bump pinned pylint version (#478)
+ - Added BirknerAlex as contributor and sorted the file (#477)
+ [Alexander Birkner]
+ - Update list of types of modules in cli.rst [saurabhvartak1982]
+ - tests: use markers to configure disable_subp_usage (#473)
+ - Add mention of vendor-data to no-cloud format documentation (#470)
+ [Landon Kirk]
+ - Fix broken link to OpenStack metadata service docs (#467)
+ [Matt Riedemann]
+ - Disable ec2 mirror for non aws instances (#390)
+ [lucasmoura] (LP: #1456277)
+ - cloud_tests: don't pass --python-version to read-dependencies (#465)
+ - networking: refactor is_physical from cloudinit.net (#457) (LP: #1884619)
+ - Enable use of the caplog fixture in pytest tests, and add a
+ cc_final_message test using it (#461)
+ - RbxCloud: Add support for FreeBSD (#464) [Adam Dobrawy]
+ - Add schema for cc_chef module (#375) [lucasmoura] (LP: #1858888)
+ - test_util: add (partial) testing for util.mount_cb (#463)
+ - .travis.yml: revert to installing ubuntu-dev-tools (#460)
+ - HACKING.rst: add details of net refactor tracking (#456)
+ - .travis.yml: rationalise installation of dependencies in host (#449)
+ - Add dermotbradley as contributor. (#458) [dermotbradley]
+ - net/networking: remove unused functions/methods (#453)
+ - distros.networking: initial implementation of layout (#391)
+ - cloud-init.service.tmpl: use "rhel" instead of "redhat" (#452)
+ - Change from redhat to rhel in systemd generator tmpl (#450)
+ [Eduardo Otubo]
+ - Hetzner: support reading user-data that is base64 encoded. (#448)
+ [Scott Moser] (LP: #1884071)
+ - HACKING.rst: add strpath gotcha to testing gotchas section (#446)
+ - cc_final_message: don't create directories when writing boot-finished
+ (#445) (LP: #1883903)
+ - .travis.yml: only store new schroot if something has changed (#440)
+ - util: add ensure_dir_exists parameter to write_file (#443)
+ - printing the error stream of the dhclient process before killing it
+ (#369) [Moustafa Moustafa]
+ - Fix link to the MAAS documentation (#442)
+ [Paride Legovini] (LP: #1883666)
+ - RPM build: disable the dynamic mirror URLs when using a proxy (#437)
+ [Paride Legovini]
+ - util: rename write_file's copy_mode parameter to preserve_mode (#439)
+ - .travis.yml: use $TRAVIS_BUILD_DIR for lxd_image caching (#438)
+ - cli.rst: alphabetise devel subcommands and add net-convert to list (#430)
+ - Default to UTF-8 in /var/log/cloud-init.log (#427) [James Falcon]
+ - travis: cache the chroot we use for package builds (#429)
+ - test: fix all flake8 E126 errors (#425) [Joshua Powers]
+ - Fixes KeyError for bridge with no "parameters:" setting (#423)
+ [Brian Candler] (LP: #1879673)
+ - When tools.conf does not exist, running cmd "vmware-toolbox-cmd
+ config get deployPkg enable-custom-scripts", the return code will
+ be EX_UNAVAILABLE(69), on this condition, it should not take it as
+ error. (#413) [chengcheng-chcheng]
+ - Document CloudStack data-server well-known hostname (#399) [Gregor Riepl]
+ - test: move conftest.py to top-level, to cover tests/ also (#414)
+ - Replace cc_chef is_installed with use of subp.is_exe. (#421)
+ [Scott Moser]
+ - Move runparts to subp. (#420) [Scott Moser]
+ - Move subp into its own module. (#416) [Scott Moser]
+ - readme: point at travis-ci.com (#417) [Joshua Powers]
+ - New feature flag functionality and fix includes failing silently (#367)
+ [James Falcon] (LP: #1734939)
+ - Enhance poll imds logging (#365) [Moustafa Moustafa]
+ - test: fix all flake8 E121 and E123 errors (#404) [Joshua Powers]
+
+ -- Daniel Watkins <oddbloke@ubuntu.com> Fri, 10 Jul 2020 09:11:25 -0400
+
cloud-init (20.2-45-g5f7825e2-0ubuntu1) groovy; urgency=medium
* d/control: drop python3-six, python3-unittest2, python3-pep8,
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index b32677b0..0ff230b5 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -106,17 +106,19 @@ Do **NOT** rely on the output of these commands as they can and will change.
Current subcommands:
- * ``schema``: a **#cloud-config** format and schema
- validator. It accepts a cloud-config yaml file and annotates potential
- schema errors locally without the need for deployment. Schema
- validation is work in progress and supports a subset of cloud-config
- modules.
-
+ * ``net-convert``: manually use cloud-init's network format conversion, useful
+ for testing configuration or testing changes to the network conversion logic
+ itself.
* ``render``: use cloud-init's jinja template render to
process **#cloud-config** or **custom-scripts**, injecting any variables
from ``/run/cloud-init/instance-data.json``. It accepts a user-data file
containing the jinja template header ``## template: jinja`` and renders
that content with any instance-data.json variables present.
+ * ``schema``: a **#cloud-config** format and schema
+ validator. It accepts a cloud-config yaml file and annotates potential
+ schema errors locally without the need for deployment. Schema
+ validation is work in progress and supports a subset of cloud-config
+ modules.
.. _cli_features:
@@ -162,7 +164,7 @@ declared to run in various boot stages in the file
* *cloud_init_modules*
* *cloud_config_modules*
-* *cloud_init_modules*
+* *cloud_final_modules*
Can be run on the command line, but each module is gated to run only once due
to semaphores in ``/var/lib/cloud/``.
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index 592328ea..a24de34f 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -9,14 +9,20 @@ dhcp lease information given to the instance.
For more details on meta-data and user-data,
refer the `CloudStack Administrator Guide`_.
-URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
-is the Virtual Router IP:
+URLs to access user-data and meta-data from the Virtual Machine.
+`data-server.` is a well-known hostname provided by the CloudStack virtual
+router that points to the next UserData server (which is usually also
+the virtual router).
.. code:: bash
- http://10.1.1.1/latest/user-data
- http://10.1.1.1/latest/meta-data
- http://10.1.1.1/latest/meta-data/{metadata type}
+ http://data-server./latest/user-data
+ http://data-server./latest/meta-data
+ http://data-server./latest/meta-data/{metadata type}
+
+If `data-server.` cannot be resolved, cloud-init will try to obtain the
+virtual router's address from the system's DHCP leases. If that fails,
+it will use the system's default gateway.
Configuration
-------------
diff --git a/doc/rtd/topics/datasources/maas.rst b/doc/rtd/topics/datasources/maas.rst
index 85c853e9..427fba24 100644
--- a/doc/rtd/topics/datasources/maas.rst
+++ b/doc/rtd/topics/datasources/maas.rst
@@ -5,6 +5,6 @@ MAAS
*TODO*
-For now see: http://maas.ubuntu.com/
+For now see: https://maas.io/docs
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 6d3075f4..0ca79102 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -53,6 +53,12 @@ These user-data and meta-data files are expected to be in the following format.
Basically, user-data is simply user-data and meta-data is a yaml formatted file
representing what you'd find in the EC2 metadata service.
+You may also optionally provide a vendor-data file in the following format.
+
+::
+
+ /vendor-data
+
Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a
sufficient disk by following the example below.
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index b8870bf1..b23b4b7c 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -5,7 +5,7 @@ OpenStack
This datasource supports reading data from the
`OpenStack Metadata Service
-<https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_.
+<https://docs.openstack.org/nova/latest/admin/metadata-service.html>`_.
Discovery
-------------
diff --git a/packages/bddeb b/packages/bddeb
index 02ac2975..b0f219b6 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -24,6 +24,7 @@ def find_root():
if "avoid-pep8-E402-import-not-top-of-file":
# Use the util functions from cloudinit
sys.path.insert(0, find_root())
+ from cloudinit import subp
from cloudinit import util
from cloudinit import temp_utils
from cloudinit import templater
@@ -53,7 +54,7 @@ def run_helper(helper, args=None, strip=True):
if args is None:
args = []
cmd = [util.abs_join(find_root(), 'tools', helper)] + args
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
if strip:
stdout = stdout.strip()
return stdout
@@ -67,7 +68,7 @@ def write_debian_folder(root, templ_data, cloud_util_deps):
# Just copy debian/ dir and then update files
pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
- util.subp(['cp', '-a', pdeb_d, deb_dir])
+ subp.subp(['cp', '-a', pdeb_d, deb_dir])
# Fill in the change log template
templater.render_to_file(util.abs_join(find_root(),
@@ -90,6 +91,8 @@ def write_debian_folder(root, templ_data, cloud_util_deps):
# NOTE: python package was moved to the front after debuild -S would fail with
# 'Please add apropriate interpreter' errors (as in debian bug 861132)
requires.extend(['python3'] + reqs + test_reqs)
+ if templ_data['debian_release'] == 'xenial':
+ requires.append('python3-pytest-catchlog')
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'control.in'),
util.abs_join(deb_dir, 'control'),
@@ -190,7 +193,7 @@ def main():
print("Extracting temporary tarball %r" % (tarball))
cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
- util.subp(cmd, capture=capture)
+ subp.subp(cmd, capture=capture)
xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long'])
templ_data.update(ver_data)
@@ -203,7 +206,7 @@ def main():
cmd = ['debuild', '--preserve-envvar', 'INIT_SYSTEM']
if args.debuild_args:
cmd.extend(args.debuild_args)
- util.subp(cmd, capture=capture)
+ subp.subp(cmd, capture=capture)
link_fn = os.path.join(os.getcwd(), 'cloud-init_all.deb')
link_dsc = os.path.join(os.getcwd(), 'cloud-init.dsc')
diff --git a/packages/brpm b/packages/brpm
index 1be8804c..56477d2e 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -24,6 +24,7 @@ def find_root():
if "avoid-pep8-E402-import-not-top-of-file":
# Use the util functions from cloudinit
sys.path.insert(0, find_root())
+ from cloudinit import subp
from cloudinit import templater
from cloudinit import util
@@ -36,7 +37,7 @@ def run_helper(helper, args=None, strip=True):
if args is None:
args = []
cmd = [util.abs_join(find_root(), 'tools', helper)] + args
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
if strip:
stdout = stdout.strip()
return stdout
@@ -174,7 +175,7 @@ def main():
else:
cmd = ['rpmbuild', '-ba', spec_fn]
- util.subp(cmd, capture=capture)
+ subp.subp(cmd, capture=capture)
# Copy the items built to our local dir
globs = []
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 45efa243..0773356b 100755
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,7 +83,7 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["redhat", "fedora", "centos"] %}
+{% if variant in ["rhel", "fedora", "centos"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index 9ad3574c..af6d9a8b 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -10,7 +10,7 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["centos", "fedora", "redhat"] %}
+{% if variant in ["centos", "fedora", "rhel"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/templates/hosts.freebsd.tmpl b/templates/hosts.freebsd.tmpl
index 7ded762f..5cd5d3bc 100644
--- a/templates/hosts.freebsd.tmpl
+++ b/templates/hosts.freebsd.tmpl
@@ -11,14 +11,13 @@ you need to add the following to config:
# a.) make changes to the master file in /etc/cloud/templates/hosts.freebsd.tmpl
# b.) change or remove the value of 'manage_etc_hosts' in
# /etc/cloud/cloud.cfg or cloud-config from user-data
-#
-# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 {{fqdn}} {{hostname}}
-127.0.0.1 localhost.localdomain localhost
-127.0.0.1 localhost4.localdomain4 localhost4
# The following lines are desirable for IPv6 capable hosts
::1 {{fqdn}} {{hostname}}
::1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
index f04d0cd4..e45ad947 100644
--- a/tests/cloud_tests/bddeb.py
+++ b/tests/cloud_tests/bddeb.py
@@ -6,7 +6,7 @@ from functools import partial
import os
import tempfile
-from cloudinit import util as c_util
+from cloudinit import subp
from tests.cloud_tests import (config, LOG)
from tests.cloud_tests import platforms
from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
@@ -42,8 +42,8 @@ def build_deb(args, instance):
'GIT_WORK_TREE': extract_dir}
LOG.debug('creating tarball of cloud-init at: %s', local_tarball)
- c_util.subp(['tar', 'cf', local_tarball, '--owner', 'root',
- '--group', 'root', '-C', args.cloud_init, '.'])
+ subp.subp(['tar', 'cf', local_tarball, '--owner', 'root',
+ '--group', 'root', '-C', args.cloud_init, '.'])
LOG.debug('copying to remote system at: %s', remote_tarball)
instance.push_file(local_tarball, remote_tarball)
@@ -55,7 +55,7 @@ def build_deb(args, instance):
LOG.debug('installing deps')
deps_path = os.path.join(extract_dir, 'tools', 'read-dependencies')
instance.execute([deps_path, '--install', '--test-distro',
- '--distro', 'ubuntu', '--python-version', '3'])
+ '--distro', 'ubuntu'])
LOG.debug('building deb in remote system at: %s', output_link)
bddeb_args = args.bddeb_args.split() if args.bddeb_args else []
diff --git a/tests/cloud_tests/platforms/lxd/image.py b/tests/cloud_tests/platforms/lxd/image.py
index b5de1f52..a88b47f3 100644
--- a/tests/cloud_tests/platforms/lxd/image.py
+++ b/tests/cloud_tests/platforms/lxd/image.py
@@ -8,6 +8,7 @@ import tempfile
from ..images import Image
from .snapshot import LXDSnapshot
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import util
@@ -75,19 +76,36 @@ class LXDImage(Image):
}
def export_image(self, output_dir):
- """Export image from lxd image store to (split) tarball on disk.
+ """Export image from lxd image store to disk.
- @param output_dir: dir to store tarballs in
- @return_value: tuple of path to metadata tarball and rootfs tarball
+ @param output_dir: dir to store the exported image in
+ @return_value: tuple of path to metadata tarball and rootfs
+
+ Only the "split" image format with separate rootfs and metadata
+ files is supported, e.g:
+
+ 71f171df[...]cd31.squashfs (could also be: .tar.xz or .tar.gz)
+ meta-71f171df[...]cd31.tar.xz
+
+ Combined images made by a single tarball are not supported.
"""
# pylxd's image export feature doesn't do split exports, so use cmdline
- c_util.subp(['lxc', 'image', 'export', self.pylxd_image.fingerprint,
- output_dir], capture=True)
- tarballs = [p for p in os.listdir(output_dir) if p.endswith('tar.xz')]
+ fp = self.pylxd_image.fingerprint
+ subp.subp(['lxc', 'image', 'export', fp, output_dir], capture=True)
+ image_files = [p for p in os.listdir(output_dir) if fp in p]
+
+ if len(image_files) != 2:
+ raise NotImplementedError(
+ "Image %s has unsupported format. "
+ "Expected 2 files, found %d: %s."
+ % (fp, len(image_files), ', '.join(image_files)))
+
metadata = os.path.join(
- output_dir, next(p for p in tarballs if p.startswith('meta-')))
+ output_dir,
+ next(p for p in image_files if p.startswith('meta-')))
rootfs = os.path.join(
- output_dir, next(p for p in tarballs if not p.startswith('meta-')))
+ output_dir,
+ next(p for p in image_files if not p.startswith('meta-')))
return (metadata, rootfs)
def import_image(self, metadata, rootfs):
@@ -101,8 +119,8 @@ class LXDImage(Image):
"""
alias = util.gen_instance_name(
image_desc=str(self), use_desc='update-metadata')
- c_util.subp(['lxc', 'image', 'import', metadata, rootfs,
- '--alias', alias], capture=True)
+ subp.subp(['lxc', 'image', 'import', metadata, rootfs,
+ '--alias', alias], capture=True)
self.pylxd_image = self.platform.query_image_by_alias(alias)
return self.pylxd_image.fingerprint
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
index 2b804a62..b27b9848 100644
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ b/tests/cloud_tests/platforms/lxd/instance.py
@@ -7,7 +7,8 @@ import shutil
import time
from tempfile import mkdtemp
-from cloudinit.util import load_yaml, subp, ProcessExecutionError, which
+from cloudinit.subp import subp, ProcessExecutionError, which
+from cloudinit.util import load_yaml
from tests.cloud_tests import LOG
from tests.cloud_tests.util import PlatformError
diff --git a/tests/cloud_tests/platforms/nocloudkvm/image.py b/tests/cloud_tests/platforms/nocloudkvm/image.py
index bc2b6e75..ff5b6ad7 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/image.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/image.py
@@ -2,7 +2,7 @@
"""NoCloud KVM Image Base Class."""
-from cloudinit import util as c_util
+from cloudinit import subp
import os
import shutil
@@ -30,8 +30,8 @@ class NoCloudKVMImage(Image):
self._img_path = os.path.join(self._workd,
os.path.basename(self._orig_img_path))
- c_util.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', orig_img_path, self._img_path])
+ subp.subp(['qemu-img', 'create', '-f', 'qcow2',
+ '-b', orig_img_path, self._img_path])
super(NoCloudKVMImage, self).__init__(platform, config)
@@ -50,10 +50,10 @@ class NoCloudKVMImage(Image):
'--system-resolvconf', self._img_path,
'--', 'chroot', '_MOUNTPOINT_']
try:
- out, err = c_util.subp(mic_chroot + env_args + list(command),
- data=stdin, decode=False)
+ out, err = subp.subp(mic_chroot + env_args + list(command),
+ data=stdin, decode=False)
return (out, err, 0)
- except c_util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
return (e.stdout, e.stderr, e.exit_code)
def snapshot(self):
diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py
index 96185b75..5140a11c 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/instance.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py
@@ -11,7 +11,7 @@ import uuid
from ..instances import Instance
from cloudinit.atomic_helper import write_json
-from cloudinit import util as c_util
+from cloudinit import subp
from tests.cloud_tests import LOG, util
# This domain contains reverse lookups for hostnames that are used.
@@ -110,8 +110,8 @@ class NoCloudKVMInstance(Instance):
"""Clean up instance."""
if self.pid:
try:
- c_util.subp(['kill', '-9', self.pid])
- except c_util.ProcessExecutionError:
+ subp.subp(['kill', '-9', self.pid])
+ except subp.ProcessExecutionError:
pass
if self.pid_file:
@@ -143,8 +143,8 @@ class NoCloudKVMInstance(Instance):
# meta-data can be yaml, but more easily pretty printed with json
write_json(meta_data_file, self.meta_data)
- c_util.subp(['cloud-localds', seed_file, user_data_file,
- meta_data_file])
+ subp.subp(['cloud-localds', seed_file, user_data_file,
+ meta_data_file])
return seed_file
diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py
index 2d1480f5..53c8ebf2 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/platform.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py
@@ -12,6 +12,7 @@ from simplestreams import util as s_util
from ..platforms import Platform
from .image import NoCloudKVMImage
from .instance import NoCloudKVMInstance
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import util
@@ -84,8 +85,8 @@ class NoCloudKVMPlatform(Platform):
"""
name = util.gen_instance_name(image_desc=image_desc, use_desc=use_desc)
img_path = os.path.join(self.config['data_dir'], name + '.qcow2')
- c_util.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', src_img_path, img_path])
+ subp.subp(['qemu-img', 'create', '-f', 'qcow2',
+ '-b', src_img_path, img_path])
return NoCloudKVMInstance(self, name, img_path, properties, config,
features, user_data, meta_data)
diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py
index bebdf1c6..58f65e52 100644
--- a/tests/cloud_tests/platforms/platforms.py
+++ b/tests/cloud_tests/platforms/platforms.py
@@ -7,6 +7,7 @@ import shutil
from simplestreams import filters, mirrors
from simplestreams import util as s_util
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import util
@@ -48,10 +49,10 @@ class Platform(object):
if os.path.exists(filename):
c_util.del_file(filename)
- c_util.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
- '-f', filename, '-P', '',
- '-C', 'ubuntu@cloud_test'],
- capture=True)
+ subp.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
+ '-f', filename, '-P', '',
+ '-C', 'ubuntu@cloud_test'],
+ capture=True)
@staticmethod
def _query_streams(img_conf, img_filter):
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index 68d59111..2e7c6686 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -321,7 +321,7 @@ class CloudTestCase(unittest.TestCase):
"Unexpected sys_info dist value")
self.assertEqual(self.os_name, v1_data['distro_release'])
self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
+ str(self.os_cfg['version']), v1_data['distro_version'])
self.assertEqual('x86_64', v1_data['machine'])
self.assertIsNotNone(
re.match(r'3.\d\.\d', v1_data['python_version']),
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index e65771b1..7dcccbdd 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -17,6 +17,7 @@ import time
import yaml
from contextlib import contextmanager
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import LOG
@@ -232,8 +233,8 @@ def flat_tar(output, basedir, owner='root', group='root'):
@param group: group archive files belong to
@return_value: none
"""
- c_util.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
- '-C', basedir] + rel_files(basedir), capture=True)
+ subp.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
+ '-C', basedir] + rel_files(basedir), capture=True)
def parse_conf_list(entries, valid=None, boolean=False):
@@ -465,7 +466,7 @@ class TargetBase(object):
return path
-class InTargetExecuteError(c_util.ProcessExecutionError):
+class InTargetExecuteError(subp.ProcessExecutionError):
"""Error type for in target commands that fail."""
default_desc = 'Unexpected error while running command.'
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 9045e743..c5675249 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -15,6 +15,7 @@ from cloudinit.tests.helpers import (
from cloudinit import handlers
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.handlers.cloud_config import CloudConfigPartHandler
@@ -66,7 +67,7 @@ class TestUpstartJobPartHandler(FilesystemMockingTestCase):
util.ensure_dir("/etc/upstart")
with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True):
- with mock.patch.object(util, 'subp') as m_subp:
+ with mock.patch.object(subp, 'subp') as m_subp:
h = UpstartJobPartHandler(paths)
h.handle_part('', handlers.CONTENT_START,
None, None, None)
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index a4261609..7fb9c3ab 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -639,6 +639,31 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
self.reRoot()
ci = stages.Init()
ci.datasource = FakeDataSource(blob)
+ ci.fetch()
+ with self.assertRaises(Exception) as context:
+ ci.consume_data()
+ self.assertIn('403', str(context.exception))
+
+ with self.assertRaises(FileNotFoundError):
+ util.load_file(ci.paths.get_ipath("cloud_config"))
+
+ @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch('cloudinit.features.ERROR_ON_USER_DATA_FAILURE', False)
+ def test_include_bad_url_no_fail(self, mock_sleep):
+ """Test #include with a bad URL and failure disabled"""
+ bad_url = 'http://bad/forbidden'
+ bad_data = '#cloud-config\nbad: true\n'
+ httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
+
+ included_url = 'http://hostname/path'
+ included_data = '#cloud-config\nincluded: true\n'
+ httpretty.register_uri(httpretty.GET, included_url, included_data)
+
+ blob = '#include\n%s\n%s' % (bad_url, included_url)
+
+ self.reRoot()
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(blob)
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index 1e66fcdb..b626229e 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -143,7 +143,7 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self.assertEqual('aliyun', self.ds.cloud_name)
self.assertEqual('ec2', self.ds.platform)
self.assertEqual(
- 'metadata (http://100.100.100.200)', self.ds.subplatform)
+ 'metadata (http://100.100.100.200)', self.ds.subplatform)
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index 3119bfac..fc59d1d5 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -15,6 +15,7 @@ import shutil
import tempfile
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import CiTestCase, mock
@@ -286,7 +287,7 @@ class TestUserDataRhevm(CiTestCase):
def test_modprobe_fails(self):
'''Test user_data_rhevm() where modprobe fails.'''
- self.m_modprobe_floppy.side_effect = util.ProcessExecutionError(
+ self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
"Failed modprobe")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
@@ -294,7 +295,7 @@ class TestUserDataRhevm(CiTestCase):
def test_no_modprobe_cmd(self):
'''Test user_data_rhevm() with no modprobe command.'''
- self.m_modprobe_floppy.side_effect = util.ProcessExecutionError(
+ self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
"No such file or dir")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
@@ -302,7 +303,7 @@ class TestUserDataRhevm(CiTestCase):
def test_udevadm_fails(self):
'''Test user_data_rhevm() where udevadm fails.'''
- self.m_udevadm_settle.side_effect = util.ProcessExecutionError(
+ self.m_udevadm_settle.side_effect = subp.ProcessExecutionError(
"Failed settle.")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 2f8561cb..a99cbd41 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -114,14 +114,14 @@ NETWORK_METADATA = {
"ipv4": {
"subnet": [
{
- "prefix": "24",
- "address": "10.0.0.0"
+ "prefix": "24",
+ "address": "10.0.0.0"
}
],
"ipAddress": [
{
- "privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81"
}
]
}
@@ -491,7 +491,7 @@ scbus-1 on xpt0 bus 0
(dsaz, 'get_hostname', mock.MagicMock()),
(dsaz, 'set_hostname', mock.MagicMock()),
(dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
- (dsaz.util, 'which', lambda x: True),
+ (dsaz.subp, 'which', lambda x: True),
(dsaz.util, 'read_dmi_data', mock.MagicMock(
side_effect=_dmi_mocks)),
(dsaz.util, 'wait_for_files', mock.MagicMock(
@@ -683,15 +683,17 @@ scbus-1 on xpt0 bus 0
'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
@mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
def test_crawl_metadata_on_reprovision_reports_ready(
- self, poll_imds_func,
- report_ready_func,
- m_write, m_dhcp):
+ self, poll_imds_func, report_ready_func, m_write, m_dhcp
+ ):
"""If reprovisioning, report ready at the end"""
ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
+ platform_settings={"PreprovisionedVm": "True"}
+ )
- data = {'ovfcontent': ovfenv,
- 'sys_cfg': {}}
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
@@ -706,15 +708,18 @@ scbus-1 on xpt0 bus 0
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@mock.patch('cloudinit.sources.DataSourceAzure.readurl')
def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_dhcp,
- m_net, report_ready_func,
- m_media_switch, m_write):
+ self, m_readurl, m_dhcp, m_net, report_ready_func,
+ m_media_switch, m_write
+ ):
"""If reprovisioning, report ready using the obtained lease"""
ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
+ platform_settings={"PreprovisionedVm": "True"}
+ )
- data = {'ovfcontent': ovfenv,
- 'sys_cfg': {}}
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
dsrc = self._get_ds(data)
lease = {
@@ -1267,20 +1272,20 @@ scbus-1 on xpt0 bus 0
expected_config['config'].append(blacklist_config)
self.assertEqual(netconfig, expected_config)
- @mock.patch(MOCKPATH + 'util.subp')
- def test_get_hostname_with_no_args(self, subp):
+ @mock.patch(MOCKPATH + 'subp.subp')
+ def test_get_hostname_with_no_args(self, m_subp):
dsaz.get_hostname()
- subp.assert_called_once_with(("hostname",), capture=True)
+ m_subp.assert_called_once_with(("hostname",), capture=True)
- @mock.patch(MOCKPATH + 'util.subp')
- def test_get_hostname_with_string_arg(self, subp):
+ @mock.patch(MOCKPATH + 'subp.subp')
+ def test_get_hostname_with_string_arg(self, m_subp):
dsaz.get_hostname(hostname_command="hostname")
- subp.assert_called_once_with(("hostname",), capture=True)
+ m_subp.assert_called_once_with(("hostname",), capture=True)
- @mock.patch(MOCKPATH + 'util.subp')
- def test_get_hostname_with_iterable_arg(self, subp):
+ @mock.patch(MOCKPATH + 'subp.subp')
+ def test_get_hostname_with_iterable_arg(self, m_subp):
dsaz.get_hostname(hostname_command=("hostname",))
- subp.assert_called_once_with(("hostname",), capture=True)
+ m_subp.assert_called_once_with(("hostname",), capture=True)
class TestAzureBounce(CiTestCase):
@@ -1302,7 +1307,7 @@ class TestAzureBounce(CiTestCase):
mock.patch.object(dsaz, 'get_metadata_from_imds',
mock.MagicMock(return_value={})))
self.patches.enter_context(
- mock.patch.object(dsaz.util, 'which', lambda x: True))
+ mock.patch.object(dsaz.subp, 'which', lambda x: True))
self.patches.enter_context(mock.patch.object(
dsaz, '_get_random_seed', return_value='wild'))
@@ -1331,7 +1336,7 @@ class TestAzureBounce(CiTestCase):
self.set_hostname = self.patches.enter_context(
mock.patch.object(dsaz, 'set_hostname'))
self.subp = self.patches.enter_context(
- mock.patch(MOCKPATH + 'util.subp'))
+ mock.patch(MOCKPATH + 'subp.subp'))
self.find_fallback_nic = self.patches.enter_context(
mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9'))
@@ -1414,7 +1419,7 @@ class TestAzureBounce(CiTestCase):
cfg = {'hostname_bounce': {'policy': 'force'}}
dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
agent_command=['not', '__builtin__'])
- patch_path = MOCKPATH + 'util.which'
+ patch_path = MOCKPATH + 'subp.which'
with mock.patch(patch_path) as m_which:
m_which.return_value = None
ret = self._get_and_setup(dsrc)
@@ -1951,11 +1956,12 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.tries += 1
if self.tries == 1:
raise requests.Timeout('Fake connection timeout')
- elif self.tries == 2:
+ elif self.tries in (2, 3):
response = requests.Response()
- response.status_code = 404
+ response.status_code = 404 if self.tries == 2 else 410
raise requests.exceptions.HTTPError(
- "fake 404", response=response)
+ "fake {}".format(response.status_code), response=response
+ )
# Third try should succeed and stop retries or redhcp
return mock.MagicMock(status_code=200, text="good", content="good")
@@ -1967,7 +1973,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.assertEqual(report_ready_func.call_count, 1)
report_ready_func.assert_called_with(lease=lease)
self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls')
- self.assertEqual(3, self.tries, 'Expected 3 total reads from IMDS')
+ self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS')
def test_poll_imds_report_ready_false(self,
report_ready_func, fake_resp,
@@ -1987,7 +1993,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.assertEqual(report_ready_func.call_count, 0)
-@mock.patch(MOCKPATH + 'util.subp')
+@mock.patch(MOCKPATH + 'subp.subp')
@mock.patch(MOCKPATH + 'util.write_file')
@mock.patch(MOCKPATH + 'util.is_FreeBSD')
@mock.patch('cloudinit.sources.helpers.netlink.'
@@ -2158,7 +2164,7 @@ class TestWBIsPlatformViable(CiTestCase):
{'os.path.exists': False,
# Non-matching Azure chassis-asset-tag
'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
- 'util.which': None},
+ 'subp.which': None},
dsaz._is_platform_viable, 'doesnotmatter'))
self.assertIn(
"DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 6344b0bb..71ef57f0 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -287,7 +287,7 @@ class TestOpenSSLManager(CiTestCase):
self.addCleanup(patches.close)
self.subp = patches.enter_context(
- mock.patch.object(azure_helper.util, 'subp'))
+ mock.patch.object(azure_helper.subp, 'subp'))
try:
self.open = patches.enter_context(
mock.patch('__builtin__.open'))
diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py
index 83c2f753..e68168f2 100644
--- a/tests/unittests/test_datasource/test_cloudstack.py
+++ b/tests/unittests/test_datasource/test_cloudstack.py
@@ -41,7 +41,7 @@ class TestCloudStackPasswordFetching(CiTestCase):
def _set_password_server_response(self, response_string):
subp = mock.MagicMock(return_value=(response_string, ''))
self.patches.enter_context(
- mock.patch('cloudinit.sources.DataSourceCloudStack.util.subp',
+ mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp',
subp))
return subp
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
index ad1ea595..a93f2195 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -576,7 +576,8 @@ class TestEc2(test_helpers.HttprettyTestCase):
md=None)
conn_error = requests.exceptions.ConnectionError(
- '[Errno 113] no route to host')
+ '[Errno 113] no route to host'
+ )
mock_success = mock.MagicMock(contents=b'fakesuccess')
mock_success.ok.return_value = True
@@ -743,7 +744,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
- m_dhcp.assert_called_once_with('eth9')
+ m_dhcp.assert_called_once_with('eth9', None)
m_net.assert_called_once_with(
broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
prefix_or_mask='255.255.255.0', router='192.168.2.1',
@@ -777,12 +778,12 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase):
'2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
ec2.get_secondary_addresses(invalid_cidr_md, self.mac))
expected_logs = [
- "WARNING: Could not parse subnet-ipv4-cidr-block"
- " something-unexpected for mac 06:17:04:d7:26:ff."
- " ipv4 network config prefix defaults to /24",
- "WARNING: Could not parse subnet-ipv6-cidr-block"
- " not/sure/what/this/is for mac 06:17:04:d7:26:ff."
- " ipv6 network config prefix defaults to /128"
+ "WARNING: Could not parse subnet-ipv4-cidr-block"
+ " something-unexpected for mac 06:17:04:d7:26:ff."
+ " ipv4 network config prefix defaults to /24",
+ "WARNING: Could not parse subnet-ipv6-cidr-block"
+ " not/sure/what/this/is for mac 06:17:04:d7:26:ff."
+ " ipv6 network config prefix defaults to /128"
]
logs = self.logs.getvalue()
for log in expected_logs:
diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py
index a9c12597..d0879545 100644
--- a/tests/unittests/test_datasource/test_hetzner.py
+++ b/tests/unittests/test_datasource/test_hetzner.py
@@ -5,10 +5,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.sources import DataSourceHetzner
+import cloudinit.sources.helpers.hetzner as hc_helper
from cloudinit import util, settings, helpers
from cloudinit.tests.helpers import mock, CiTestCase
+import base64
+import pytest
+
METADATA = util.load_yaml("""
hostname: cloudinit-test
instance-id: 123456
@@ -115,3 +119,22 @@ class TestDataSourceHetzner(CiTestCase):
# These are a white box attempt to ensure it did not search.
m_find_fallback.assert_not_called()
m_read_md.assert_not_called()
+
+
+class TestMaybeB64Decode:
+ """Test the maybe_b64decode helper function."""
+
+ @pytest.mark.parametrize("invalid_input", (str("not bytes"), int(4)))
+ def test_raises_error_on_non_bytes(self, invalid_input):
+ """maybe_b64decode should raise error if data is not bytes."""
+ with pytest.raises(TypeError):
+ hc_helper.maybe_b64decode(invalid_input)
+
+ @pytest.mark.parametrize("in_data,expected", [
+ # If data is not b64 encoded, then return value should be the same.
+ (b"this is my data", b"this is my data"),
+ # If data is b64 encoded, then return value should be decoded.
+ (base64.b64encode(b"data"), b"data"),
+ ])
+ def test_happy_path(self, in_data, expected):
+ assert expected == hc_helper.maybe_b64decode(in_data)
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index de896a9e..9c6070a5 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -9,6 +9,8 @@ import os
import pwd
import unittest
+import pytest
+
TEST_VARS = {
'VAR1': 'single',
@@ -130,18 +132,18 @@ class TestOpenNebulaDataSource(CiTestCase):
def test_seed_dir_non_contextdisk(self):
self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
- self.seed_dir)
+ self.seed_dir, mock.Mock())
def test_seed_dir_empty1_context(self):
populate_dir(self.seed_dir, {'context.sh': ''})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertIsNone(results['userdata'])
self.assertEqual(results['metadata'], {})
def test_seed_dir_empty2_context(self):
populate_context_dir(self.seed_dir, {})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertIsNone(results['userdata'])
self.assertEqual(results['metadata'], {})
@@ -151,11 +153,11 @@ class TestOpenNebulaDataSource(CiTestCase):
self.assertRaises(ds.BrokenContextDiskDir,
ds.read_context_disk_dir,
- self.seed_dir)
+ self.seed_dir, mock.Mock())
def test_context_parser(self):
populate_context_dir(self.seed_dir, TEST_VARS)
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('metadata' in results)
self.assertEqual(TEST_VARS, results['metadata'])
@@ -166,7 +168,7 @@ class TestOpenNebulaDataSource(CiTestCase):
for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
populate_context_dir(my_d, {k: '\n'.join(public_keys)})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('metadata' in results)
self.assertTrue('public-keys' in results['metadata'])
@@ -180,7 +182,7 @@ class TestOpenNebulaDataSource(CiTestCase):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: USER_DATA,
'USERDATA_ENCODING': ''})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('userdata' in results)
self.assertEqual(USER_DATA, results['userdata'])
@@ -190,7 +192,7 @@ class TestOpenNebulaDataSource(CiTestCase):
for k in ('USER_DATA', 'USERDATA'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: b64userdata})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('userdata' in results)
self.assertEqual(b64userdata, results['userdata'])
@@ -200,7 +202,7 @@ class TestOpenNebulaDataSource(CiTestCase):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: util.b64e(USER_DATA),
'USERDATA_ENCODING': 'base64'})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('userdata' in results)
self.assertEqual(USER_DATA, results['userdata'])
@@ -212,7 +214,7 @@ class TestOpenNebulaDataSource(CiTestCase):
for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('metadata' in results)
self.assertTrue('local-hostname' in results['metadata'])
@@ -227,7 +229,7 @@ class TestOpenNebulaDataSource(CiTestCase):
# without ETH0_MAC
# for Older OpenNebula?
populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -237,7 +239,7 @@ class TestOpenNebulaDataSource(CiTestCase):
# ETH0_IP and ETH0_MAC
populate_context_dir(
self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -249,7 +251,7 @@ class TestOpenNebulaDataSource(CiTestCase):
# "AR = [ TYPE = ETHER ]"
populate_context_dir(
self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -263,7 +265,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_MAC': MACADDR,
'ETH0_MASK': '255.255.0.0'
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -277,7 +279,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_MAC': MACADDR,
'ETH0_MASK': ''
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -290,7 +292,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6': IP6_GLOBAL,
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -303,7 +305,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6_ULA': IP6_ULA,
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -317,7 +319,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -331,7 +333,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6_PREFIX_LENGTH': '',
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -368,7 +370,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
expected = {
'02:00:0a:12:01:01': 'ETH0',
'02:00:0a:12:0f:0f': 'ETH1', }
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(expected, net.context_devname)
def test_get_nameservers(self):
@@ -383,21 +385,21 @@ class TestOpenNebulaNetwork(unittest.TestCase):
expected = {
'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
'search': ['example.com', 'example.org']}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_nameservers('eth0')
self.assertEqual(expected, val)
def test_get_mtu(self):
"""Verify get_mtu('device') correctly returns MTU size."""
context = {'ETH0_MTU': '1280'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_mtu('eth0')
self.assertEqual('1280', val)
def test_get_ip(self):
"""Verify get_ip('device') correctly returns IPv4 address."""
context = {'ETH0_IP': PUBLIC_IP}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip('eth0', MACADDR)
self.assertEqual(PUBLIC_IP, val)
@@ -408,7 +410,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
string.
"""
context = {'ETH0_IP': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip('eth0', MACADDR)
self.assertEqual(IP_BY_MACADDR, val)
@@ -421,7 +423,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_IP6': IP6_GLOBAL,
'ETH0_IP6_ULA': '', }
expected = [IP6_GLOBAL]
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6('eth0')
self.assertEqual(expected, val)
@@ -434,7 +436,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_IP6': '',
'ETH0_IP6_ULA': IP6_ULA, }
expected = [IP6_ULA]
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6('eth0')
self.assertEqual(expected, val)
@@ -447,7 +449,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_IP6': IP6_GLOBAL,
'ETH0_IP6_ULA': IP6_ULA, }
expected = [IP6_GLOBAL, IP6_ULA]
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6('eth0')
self.assertEqual(expected, val)
@@ -456,7 +458,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
"""
context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6_prefix('eth0')
self.assertEqual(IP6_PREFIX, val)
@@ -467,7 +469,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
string.
"""
context = {'ETH0_IP6_PREFIX_LENGTH': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6_prefix('eth0')
self.assertEqual('64', val)
@@ -477,7 +479,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
address.
"""
context = {'ETH0_GATEWAY': '1.2.3.5'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_gateway('eth0')
self.assertEqual('1.2.3.5', val)
@@ -487,7 +489,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
address.
"""
context = {'ETH0_GATEWAY6': IP6_GW}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_gateway6('eth0')
self.assertEqual(IP6_GW, val)
@@ -496,7 +498,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_mask('device') correctly returns IPv4 subnet mask.
"""
context = {'ETH0_MASK': '255.255.0.0'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_mask('eth0')
self.assertEqual('255.255.0.0', val)
@@ -506,7 +508,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns default value '255.255.255.0' if ETH0_MASK has empty string.
"""
context = {'ETH0_MASK': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_mask('eth0')
self.assertEqual('255.255.255.0', val)
@@ -515,7 +517,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_network('device') correctly returns IPv4 network address.
"""
context = {'ETH0_NETWORK': '1.2.3.0'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_network('eth0', MACADDR)
self.assertEqual('1.2.3.0', val)
@@ -526,7 +528,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
empty string.
"""
context = {'ETH0_NETWORK': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_network('eth0', MACADDR)
self.assertEqual('10.18.1.0', val)
@@ -535,7 +537,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_field('device', 'name') returns *context* value.
"""
context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy')
self.assertEqual('DUMMY_VALUE', val)
@@ -545,7 +547,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
value.
"""
context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
self.assertEqual('DUMMY_VALUE', val)
@@ -555,7 +557,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
value if context value is empty string.
"""
context = {'ETH9_DUMMY': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
self.assertEqual('DEFAULT_VALUE', val)
@@ -565,7 +567,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
empty string.
"""
context = {'ETH9_DUMMY': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy')
self.assertEqual(None, val)
@@ -575,7 +577,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
None.
"""
context = {'ETH9_DUMMY': None}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy')
self.assertEqual(None, val)
@@ -595,7 +597,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY
@@ -611,7 +613,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -630,7 +632,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY6
@@ -646,7 +648,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -667,7 +669,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
@@ -687,7 +689,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
IP6_GLOBAL + '/' + IP6_PREFIX,
IP6_ULA + '/' + IP6_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -708,7 +710,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
@@ -728,7 +730,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -747,7 +749,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_MTU
@@ -763,14 +765,14 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_eth0(self, m_get_phys_by_mac):
for nic in self.system_nics:
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork({})
+ net = ds.OpenNebulaNetwork({}, mock.Mock())
expected = {
'version': 2,
'ethernets': {
@@ -780,6 +782,14 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.assertEqual(net.gen_conf(), expected)
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_distro_passed_through(self, m_get_physical_nics_by_mac):
+ ds.OpenNebulaNetwork({}, mock.sentinel.distro)
+ self.assertEqual(
+ [mock.call(mock.sentinel.distro)],
+ m_get_physical_nics_by_mac.call_args_list,
+ )
+
def test_eth0_override(self):
self.maxDiff = None
context = {
@@ -798,7 +808,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_SEARCH_DOMAIN': '',
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context,
+ net = ds.OpenNebulaNetwork(context, mock.Mock(),
system_nics_by_mac={MACADDR: nic})
expected = {
'version': 2,
@@ -830,7 +840,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_SEARCH_DOMAIN': 'example.com example.org',
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context,
+ net = ds.OpenNebulaNetwork(context, mock.Mock(),
system_nics_by_mac={MACADDR: nic})
expected = {
@@ -884,7 +894,10 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org',
}
net = ds.OpenNebulaNetwork(
- context, system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'})
+ context,
+ mock.Mock(),
+ system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}
+ )
expected = {
'version': 2,
@@ -914,12 +927,43 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.assertEqual(expected, net.gen_conf())
-class TestParseShellConfig(unittest.TestCase):
+class TestParseShellConfig:
+ @pytest.mark.allow_subp_for("bash")
def test_no_seconds(self):
cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
- self.assertEqual(ret, {"foo": "bar", "xx": "foo"})
+ assert ret == {"foo": "bar", "xx": "foo"}
+
+
+class TestGetPhysicalNicsByMac:
+ @pytest.mark.parametrize(
+ "interfaces_by_mac,physical_devs,expected_return",
+ [
+ # No interfaces => empty return
+ ({}, [], {}),
+ # Only virtual interface => empty return
+ ({"mac1": "virtual0"}, [], {}),
+ # Only physical interface => it is returned
+ ({"mac2": "physical0"}, ["physical0"], {"mac2": "physical0"}),
+ # Combination of physical and virtual => only physical returned
+ (
+ {"mac3": "physical1", "mac4": "virtual1"},
+ ["physical1"],
+ {"mac3": "physical1"},
+ ),
+ ],
+ )
+ def test(self, interfaces_by_mac, physical_devs, expected_return):
+ distro = mock.Mock()
+ distro.networking.is_physical.side_effect = (
+ lambda devname: devname in physical_devs
+ )
+ with mock.patch(
+ DS_PATH + ".net.get_interfaces_by_mac",
+ return_value=interfaces_by_mac,
+ ):
+ assert expected_return == ds.get_physical_nics_by_mac(distro)
def populate_context_dir(path, variables):
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index b534457c..3cfba74d 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -279,7 +279,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(2, len(ds_os_local.files))
self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
self.assertIsNone(ds_os_local.vendordata_raw)
- m_dhcp.assert_called_with('eth9')
+ m_dhcp.assert_called_with('eth9', None)
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index 486a2345..3ef7a4b2 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -10,6 +10,7 @@ import os
from collections import OrderedDict
from textwrap import dedent
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call
from cloudinit.helpers import Paths
@@ -401,8 +402,8 @@ class TestTransportIso9660(CiTestCase):
self.assertTrue(dsovf.maybe_cdrom_device('xvdza1'))
-@mock.patch(MPATH + "util.which")
-@mock.patch(MPATH + "util.subp")
+@mock.patch(MPATH + "subp.which")
+@mock.patch(MPATH + "subp.subp")
class TestTransportVmwareGuestinfo(CiTestCase):
"""Test the com.vmware.guestInfo transport implemented in
transport_vmware_guestinfo."""
@@ -420,7 +421,7 @@ class TestTransportVmwareGuestinfo(CiTestCase):
def test_notfound_on_exit_code_1(self, m_subp, m_which):
"""If vmware-rpctool exits 1, then must return not found."""
m_which.return_value = self.rpctool_path
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout="", stderr="No value found", exit_code=1, cmd=["unused"])
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
@@ -442,7 +443,7 @@ class TestTransportVmwareGuestinfo(CiTestCase):
def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which):
"""If vmware-rpctool exits non zero or 1, warnings should be logged."""
m_which.return_value = self.rpctool_path
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout=None, stderr="No value found", exit_code=2, cmd=["unused"])
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py
index 553af62e..d017510e 100644
--- a/tests/unittests/test_datasource/test_rbx.py
+++ b/tests/unittests/test_datasource/test_rbx.py
@@ -4,7 +4,7 @@ from cloudinit import helpers
from cloudinit import distros
from cloudinit.sources import DataSourceRbxCloud as ds
from cloudinit.tests.helpers import mock, CiTestCase, populate_dir
-from cloudinit import util
+from cloudinit import subp
DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
@@ -157,7 +157,7 @@ class TestRbxDataSource(CiTestCase):
expected
)
- @mock.patch(DS_PATH + '.util.subp')
+ @mock.patch(DS_PATH + '.subp.subp')
def test_gratuitous_arp_run_standard_arping(self, m_subp):
"""Test handle run arping & parameters."""
items = [
@@ -183,7 +183,7 @@ class TestRbxDataSource(CiTestCase):
], m_subp.call_args_list
)
- @mock.patch(DS_PATH + '.util.subp')
+ @mock.patch(DS_PATH + '.subp.subp')
def test_handle_rhel_like_arping(self, m_subp):
"""Test handle on RHEL-like distros."""
items = [
@@ -201,8 +201,8 @@ class TestRbxDataSource(CiTestCase):
)
@mock.patch(
- DS_PATH + '.util.subp',
- side_effect=util.ProcessExecutionError()
+ DS_PATH + '.subp.subp',
+ side_effect=subp.ProcessExecutionError()
)
def test_continue_on_arping_error(self, m_subp):
"""Continue when command error"""
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index 1b4dd0ad..9d82bda9 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -353,12 +353,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
netcfg = self.datasource.network_config
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
self.assertEqual(netcfg, resp)
@mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
@@ -371,25 +375,32 @@ class TestDataSourceScaleway(HttprettyTestCase):
m_get_cmdline.return_value = 'scaleway'
fallback_nic.return_value = 'ens2'
self.datasource.metadata['ipv6'] = {
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
- }
+ 'address': '2000:abc:4444:9876::42:999',
+ 'gateway': '2000:abc:4444:9876::42:000',
+ 'netmask': '127',
+ }
netcfg = self.datasource.network_config
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'},
- {'type': 'static',
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127', }
- ]
-
- }]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [
+ {
+ 'type': 'dhcp4'
+ },
+ {
+ 'type': 'static',
+ 'address': '2000:abc:4444:9876::42:999',
+ 'gateway': '2000:abc:4444:9876::42:000',
+ 'netmask': '127',
+ }
+ ]
}
+ ]
+ }
self.assertEqual(netcfg, resp)
@mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
@@ -417,12 +428,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
self.datasource._network_config = sources.UNSET
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
@@ -441,12 +456,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
self.datasource._network_config = None
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index f1ab1d7a..5847a384 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -32,8 +32,8 @@ from cloudinit.sources.DataSourceSmartOS import (
from cloudinit.event import EventType
from cloudinit import helpers as c_helpers
-from cloudinit.util import (
- b64e, subp, ProcessExecutionError, which, write_file)
+from cloudinit.util import (b64e, write_file)
+from cloudinit.subp import (subp, ProcessExecutionError, which)
from cloudinit.tests.helpers import (
CiTestCase, mock, FilesystemMockingTestCase, skipIf)
@@ -667,7 +667,7 @@ class TestIdentifyFile(CiTestCase):
with self.allow_subp(["file"]):
self.assertEqual("text/plain", identify_file(fname))
- @mock.patch(DSMOS + ".util.subp")
+ @mock.patch(DSMOS + ".subp.subp")
def test_returns_none_on_error(self, m_subp):
"""On 'file' execution error, None should be returned."""
m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99)
diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py
index b38e4af5..3a68f2a9 100644
--- a/tests/unittests/test_distros/test_bsd_utils.py
+++ b/tests/unittests/test_distros/test_bsd_utils.py
@@ -62,5 +62,6 @@ class TestBsdUtils(CiTestCase):
self.load_file.return_value = RC_FILE.format(hostname='foo')
bsd_utils.set_rc_config_value('hostname', 'bar')
self.write_file.assert_called_with(
- '/etc/rc.conf',
- RC_FILE.format(hostname='bar'))
+ '/etc/rc.conf',
+ RC_FILE.format(hostname='bar')
+ )
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index ef11784d..94ab052d 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -46,7 +46,7 @@ class MyBaseDistro(distros.Distro):
@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
-@mock.patch("cloudinit.distros.util.subp")
+@mock.patch("cloudinit.distros.subp.subp")
class TestCreateUser(CiTestCase):
with_logs = True
@@ -240,7 +240,7 @@ class TestCreateUser(CiTestCase):
[mock.call(set(['auth1']), user), # not disabled
mock.call(set(['key1']), 'foouser', options=disable_prefix)])
- @mock.patch("cloudinit.distros.util.which")
+ @mock.patch("cloudinit.distros.subp.which")
def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp,
m_is_snappy):
"""Lock uses usermod --lock if no 'passwd' cmd available."""
@@ -250,7 +250,7 @@ class TestCreateUser(CiTestCase):
[mock.call(['usermod', '--lock', 'bob'])],
m_subp.call_args_list)
- @mock.patch("cloudinit.distros.util.which")
+ @mock.patch("cloudinit.distros.subp.which")
def test_lock_with_passwd_if_available(self, m_which, m_subp,
m_is_snappy):
"""Lock with only passwd will use passwd."""
@@ -260,7 +260,7 @@ class TestCreateUser(CiTestCase):
[mock.call(['passwd', '-l', 'bob'])],
m_subp.call_args_list)
- @mock.patch("cloudinit.distros.util.which")
+ @mock.patch("cloudinit.distros.subp.which")
def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp,
m_is_snappy):
"""Lock with no commands available raises RuntimeError."""
diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/test_distros/test_debian.py
index da16a797..7ff8240b 100644
--- a/tests/unittests/test_distros/test_debian.py
+++ b/tests/unittests/test_distros/test_debian.py
@@ -5,7 +5,7 @@ from cloudinit import util
from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-@mock.patch("cloudinit.distros.debian.util.subp")
+@mock.patch("cloudinit.distros.debian.subp.subp")
class TestDebianApplyLocale(FilesystemMockingTestCase):
def setUp(self):
diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py
index 8af253a2..be565b04 100644
--- a/tests/unittests/test_distros/test_freebsd.py
+++ b/tests/unittests/test_distros/test_freebsd.py
@@ -8,7 +8,7 @@ import os
class TestDeviceLookUp(CiTestCase):
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_find_freebsd_part_label(self, mock_subp):
glabel_out = '''
gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1
@@ -19,7 +19,7 @@ gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1
res = find_freebsd_part("/dev/label/rootfs")
self.assertEqual("da0p2", res)
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_find_freebsd_part_gpt(self, mock_subp):
glabel_out = '''
gpt/bootfs N/A vtbd0p1
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 02b334e3..44607489 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -6,6 +6,7 @@ from cloudinit import util
from cloudinit.tests import helpers
import os
+import pytest
import shutil
import tempfile
from unittest import mock
@@ -37,24 +38,6 @@ gapmi = distros._get_arch_package_mirror_info
class TestGenericDistro(helpers.FilesystemMockingTestCase):
- def return_first(self, mlist):
- if not mlist:
- return None
- return mlist[0]
-
- def return_second(self, mlist):
- if not mlist:
- return None
- return mlist[1]
-
- def return_none(self, _mlist):
- return None
-
- def return_last(self, mlist):
- if not mlist:
- return None
- return(mlist[-1])
-
def setUp(self):
super(TestGenericDistro, self).setUp()
# Make a temp directoy for tests to use.
@@ -145,61 +128,6 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
arch_mirrors = gapmi(package_mirrors, arch="amd64")
self.assertEqual(package_mirrors[0], arch_mirrors)
- def test_get_package_mirror_info_az_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="us-east-1a")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_second)
- self.assertEqual(results,
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_none)
- self.assertEqual(results, package_mirrors[0]['failsafe'])
-
- def test_get_package_mirror_info_az_non_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- self.assertEqual(results,
- {'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror2-intel'})
-
- def test_get_package_mirror_info_none(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone=None)
-
- # because both search entries here replacement based on
- # availability-zone, the filter will be called with an empty list and
- # failsafe should be taken.
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- self.assertEqual(results,
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror2-intel'})
-
def test_systemd_in_use(self):
cls = distros.fetch("ubuntu")
d = cls("ubuntu", {}, None)
@@ -245,7 +173,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
for d_name in ("ubuntu", "rhel"):
cls = distros.fetch(d_name)
d = cls(d_name, {}, None)
- with mock.patch("cloudinit.util.subp") as m_subp:
+ with mock.patch("cloudinit.subp.subp") as m_subp:
d.expire_passwd("myuser")
m_subp.assert_called_once_with(["passwd", "--expire", "myuser"])
@@ -253,10 +181,122 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
"""Test FreeBSD.expire_passwd uses the pw command."""
cls = distros.fetch("freebsd")
d = cls("freebsd", {}, None)
- with mock.patch("cloudinit.util.subp") as m_subp:
+ with mock.patch("cloudinit.subp.subp") as m_subp:
d.expire_passwd("myuser")
m_subp.assert_called_once_with(
["pw", "usermod", "myuser", "-p", "01-Jan-1970"])
+class TestGetPackageMirrors:
+
+ def return_first(self, mlist):
+ if not mlist:
+ return None
+ return mlist[0]
+
+ def return_second(self, mlist):
+ if not mlist:
+ return None
+
+ return mlist[1] if len(mlist) > 1 else None
+
+ def return_none(self, _mlist):
+ return None
+
+ def return_last(self, mlist):
+ if not mlist:
+ return None
+ return(mlist[-1])
+
+ @pytest.mark.parametrize(
+ "allow_ec2_mirror, platform_type, mirrors",
+ [
+ (True, "ec2", [
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ ]),
+ (True, "other", [
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ ]),
+ (False, "ec2", [
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ ]),
+ (False, "other", [
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror2-intel'}
+ ])
+ ])
+ def test_get_package_mirror_info_az_ec2(self,
+ allow_ec2_mirror,
+ platform_type,
+ mirrors):
+ flag_path = "cloudinit.distros." \
+ "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+ with mock.patch(flag_path, allow_ec2_mirror):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(
+ availability_zone="us-east-1a",
+ platform_type=platform_type)
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_first)
+ assert(results == mirrors[0])
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_second)
+ assert(results == mirrors[1])
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_none)
+ assert(results == package_mirrors[0]['failsafe'])
+
+ def test_get_package_mirror_info_az_non_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_first)
+ assert(results == {
+ 'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror1-intel'}
+ )
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_last)
+ assert(results == {
+ 'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ )
+
+ def test_get_package_mirror_info_none(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone=None)
+
+ # because both search entries here replacement based on
+ # availability-zone, the filter will be called with an empty list and
+ # failsafe should be taken.
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_first)
+ assert(results == {
+ 'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror1-intel'}
+ )
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_last)
+ assert(results == {
+ 'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror2-intel'}
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index ccf66161..8d7b09c8 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -12,6 +12,7 @@ from cloudinit import helpers
from cloudinit import settings
from cloudinit.tests.helpers import (
FilesystemMockingTestCase, dir2dict)
+from cloudinit import subp
from cloudinit import util
@@ -532,7 +533,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
NETWORKING_IPV6=yes
IPV6_AUTOCONF=no
"""),
- }
+ }
# rh_distro.apply_network_config(V1_NET_CFG_IPV6, False)
self._apply_and_verify(self.distro.apply_network_config,
V1_NET_CFG_IPV6,
@@ -656,7 +657,7 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
IP=dhcp
Interface=eth1
"""),
- }
+ }
# ub_distro.apply_network_config(V1_NET_CFG, False)
self._apply_and_verify(self.distro.apply_network_config,
@@ -688,6 +689,6 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
def get_mode(path, target=None):
- return os.stat(util.target_path(target, path)).st_mode & 0o777
+ return os.stat(subp.target_path(target, path)).st_mode & 0o777
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index a6faf0ef..fa48410a 100644
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -307,7 +307,7 @@ class TestUGNormalize(TestCase):
self.assertEqual({'default': False}, users['joe'])
self.assertEqual({'default': False}, users['bob'])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_create_snap_user(self, mock_subp):
mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
'')]
@@ -326,7 +326,7 @@ class TestUGNormalize(TestCase):
mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
self.assertEqual(username, 'joe')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_create_snap_user_known(self, mock_subp):
mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
'')]
@@ -348,7 +348,7 @@ class TestUGNormalize(TestCase):
@mock.patch('cloudinit.util.system_is_snappy')
@mock.patch('cloudinit.util.is_group')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp,
mock_snappy):
mock_isgrp.return_value = False
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index c2318570..cb57f2d0 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -6,6 +6,7 @@ import os
from uuid import uuid4
from cloudinit import safeyaml
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import (
CiTestCase, dir2dict, populate_dir, populate_dir_with_ts)
@@ -160,8 +161,8 @@ class DsIdentifyBase(CiTestCase):
rc = 0
try:
- out, err = util.subp(['sh', '-c', '. %s' % wrap], capture=True)
- except util.ProcessExecutionError as e:
+ out, err = subp.subp(['sh', '-c', '. %s' % wrap], capture=True)
+ except subp.ProcessExecutionError as e:
rc = e.exit_code
out = e.stdout
err = e.stderr
@@ -272,6 +273,10 @@ class TestDsIdentify(DsIdentifyBase):
"""Rbx datasource has a disk with LABEL=CLOUDMD."""
self._test_ds_found('RbxCloud')
+ def test_rbx_cloud_lower(self):
+ """Rbx datasource has a disk with LABEL=cloudmd."""
+ self._test_ds_found('RbxCloudLower')
+
def test_config_drive_upper(self):
"""ConfigDrive datasource has a disk with LABEL=CONFIG-2."""
self._test_ds_found('ConfigDriveUpper')
@@ -947,6 +952,18 @@ VALID_CFG = {
)},
],
},
+ 'RbxCloudLower': {
+ 'ds': 'RbxCloud',
+ 'mocks': [
+ {'name': 'blkid', 'ret': 0,
+ 'out': blkid_out(
+ [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
+ {'DEVNAME': 'vda2', 'TYPE': 'ext4',
+ 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
+ {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'cloudmd'}]
+ )},
+ ],
+ },
'Hetzner': {
'ds': 'Hetzner',
'files': {P_SYS_VENDOR: 'Hetzner\n'},
@@ -1040,11 +1057,11 @@ VALID_CFG = {
'Ec2-E24Cloud': {
'ds': 'Ec2',
'files': {P_SYS_VENDOR: 'e24cloud\n'},
- },
+ },
'Ec2-E24Cloud-negative': {
'ds': 'Ec2',
'files': {P_SYS_VENDOR: 'e24cloudyday\n'},
- }
+ }
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
index 69009a44..369480be 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
@@ -13,6 +13,7 @@ from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
from cloudinit import templater
+from cloudinit import subp
from cloudinit import util
from cloudinit.config import cc_apt_configure
@@ -66,7 +67,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
@@ -100,6 +101,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
cfg = {'apt_mirror_search': mirror}
else:
cfg = {'apt_mirror': mirror}
+
mycloud = self._get_cloud(distro)
with mock.patch.object(util, 'write_file') as mockwf:
@@ -107,8 +109,9 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
return_value="faketmpl") as mocklf:
with mock.patch.object(os.path, 'isfile',
return_value=True) as mockisfile:
- with mock.patch.object(templater, 'render_string',
- return_value="fake") as mockrnd:
+ with mock.patch.object(
+ templater, 'render_string',
+ return_value='fake') as mockrnd:
with mock.patch.object(util, 'rename'):
cc_apt_configure.handle("test", cfg, mycloud,
LOG, None)
@@ -176,7 +179,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
# the second mock restores the original subp
with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(util, 'subp', self.subp):
+ with mock.patch.object(subp, 'subp', self.subp):
with mock.patch.object(Distro, 'get_primary_arch',
return_value='amd64'):
cc_apt_configure.handle("notimportant", cfg, mycloud,
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
index 0aa3d51a..b96fd4d4 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
@@ -13,6 +13,7 @@ from unittest.mock import call
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.config import cc_apt_configure
@@ -94,7 +95,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfigSourceList - Class to test sources list rendering"""
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
@@ -222,7 +223,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
# the second mock restores the original subp
with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(util, 'subp', self.subp):
+ with mock.patch.object(subp, 'subp', self.subp):
with mock.patch.object(Distro, 'get_primary_arch',
return_value='amd64'):
cc_apt_configure.handle("notimportant", cfg, mycloud,
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
index 866752ef..367971cb 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py
@@ -14,6 +14,7 @@ from unittest.mock import call
from cloudinit.config import cc_apt_configure
from cloudinit import gpg
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import TestCase
@@ -42,10 +43,17 @@ class FakeDistro(object):
return
+class FakeDatasource:
+ """Fake Datasource helper object"""
+ def __init__(self):
+ self.region = 'region'
+
+
class FakeCloud(object):
"""Fake Cloud helper object"""
def __init__(self):
self.distro = FakeDistro()
+ self.datasource = FakeDatasource()
class TestAptSourceConfig(TestCase):
@@ -271,7 +279,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf(cfg)
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('fakekey 1234', '')) as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
@@ -356,7 +364,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_with(['apt-key', 'add', '-'],
@@ -398,7 +406,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile}
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_once_with(['apt-key', 'add', '-'],
@@ -413,7 +421,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile}
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('fakekey 1212', '')) as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
@@ -476,7 +484,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile}
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_once_with(['add-apt-repository',
'ppa:smoser/cloud-init-test'],
@@ -495,7 +503,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile3}
cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud,
None, None)
calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index aefe26c4..ac847238 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -18,6 +18,7 @@ from cloudinit import cloud
from cloudinit import distros
from cloudinit import gpg
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.config import cc_apt_configure
@@ -48,6 +49,18 @@ MOCK_LSB_RELEASE_DATA = {
'release': '18.04', 'codename': 'bionic'}
+class FakeDatasource:
+ """Fake Datasource helper object"""
+ def __init__(self):
+ self.region = 'region'
+
+
+class FakeCloud:
+ """Fake Cloud helper object"""
+ def __init__(self):
+ self.datasource = FakeDatasource()
+
+
class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfig
Main Class to test apt configs
@@ -221,7 +234,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"""
params = self._get_default_params()
- with mock.patch("cloudinit.util.subp",
+ with mock.patch("cloudinit.subp.subp",
return_value=('fakekey 1234', '')) as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -296,7 +309,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
' xenial main'),
'key': "fakekey 4321"}}
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -318,7 +331,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
params = self._get_default_params()
cfg = {self.aptlistfile: {'key': "fakekey 4242"}}
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -333,7 +346,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
params = self._get_default_params()
cfg = {self.aptlistfile: {'keyid': "03683F77"}}
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('fakekey 1212', '')) as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -416,7 +429,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
params = self._get_default_params()
cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}}
- with mock.patch("cloudinit.util.subp") as mockobj:
+ with mock.patch("cloudinit.subp.subp") as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
mockobj.assert_any_call(['add-apt-repository',
@@ -432,7 +445,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'},
self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}}
- with mock.patch("cloudinit.util.subp") as mockobj:
+ with mock.patch("cloudinit.subp.subp") as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
@@ -470,7 +483,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
fromfn = ("%s/%s_%s" % (pre, archive, post))
tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
self.assertEqual(mirrors['MIRROR'],
"http://test.ubuntu.com/%s/" % component)
@@ -558,7 +571,8 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"security": [{'arches': ["default"],
"uri": smir}]}
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, 'amd64')
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), 'amd64')
self.assertEqual(mirrors['MIRROR'],
pmir)
@@ -593,7 +607,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"security": [{'arches': ["default"], "uri": "nothis-security"},
{'arches': [arch], "uri": smir}]}
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
self.assertEqual(mirrors['PRIMARY'], pmir)
self.assertEqual(mirrors['MIRROR'], pmir)
@@ -612,7 +626,8 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
{'arches': ["default"],
"uri": smir}]}
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, 'amd64')
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), 'amd64')
self.assertEqual(mirrors['MIRROR'],
pmir)
@@ -672,7 +687,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
side_effect=[pmir, smir]) as mocksearch:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None,
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(),
'amd64')
calls = [call(["pfailme", pmir]),
@@ -711,7 +726,8 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
# should not be called, since primary is specified
with mock.patch.object(cc_apt_configure.util,
'search_for_mirror') as mockse:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), arch)
mockse.assert_not_called()
self.assertEqual(mirrors['MIRROR'],
@@ -996,7 +1012,7 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
class TestDebconfSelections(TestCase):
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_set_sel_appends_newline_if_absent(self, m_subp):
"""Automatically append a newline to debconf-set-selections config."""
selections = b'some/setting boolean true'
@@ -1081,7 +1097,7 @@ class TestDebconfSelections(TestCase):
self.assertTrue(m_get_inst.called)
self.assertEqual(m_dpkg_r.call_count, 0)
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
target = "/foo-target"
@@ -1104,12 +1120,12 @@ class TestDebconfSelections(TestCase):
'cloud-init']
self.assertEqual(expected, found)
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
cc_apt_configure.dpkg_reconfigure([])
m_subp.assert_not_called()
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar'])
m_subp.assert_not_called()
diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py
index a76760fa..b53d60d4 100644
--- a/tests/unittests/test_handler/test_handler_bootcmd.py
+++ b/tests/unittests/test_handler/test_handler_bootcmd.py
@@ -2,7 +2,7 @@
from cloudinit.config.cc_bootcmd import handle, schema
from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
+from cloudinit import (distros, helpers, cloud, subp, util)
from cloudinit.tests.helpers import (
CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
@@ -36,7 +36,7 @@ class TestBootcmd(CiTestCase):
def setUp(self):
super(TestBootcmd, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = self.tmp_dir()
def _get_cloud(self, distro):
@@ -130,7 +130,7 @@ class TestBootcmd(CiTestCase):
with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
with self.allow_subp(['/bin/sh']):
- with self.assertRaises(util.ProcessExecutionError) as ctxt:
+ with self.assertRaises(subp.ProcessExecutionError) as ctxt:
handle('does-not-matter', valid_config, cc, LOG, [])
self.assertIn(
'Unexpected error while running command.\n'
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 286ef771..db0cdf9b 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -3,6 +3,7 @@
from cloudinit import cloud
from cloudinit.config import cc_ca_certs
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import TestCase
@@ -228,7 +229,7 @@ class TestAddCaCerts(TestCase):
class TestUpdateCaCerts(unittest.TestCase):
def test_commands(self):
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_ca_certs.update_ca_certs()
mockobj.assert_called_once_with(
["update-ca-certificates"], capture=False)
@@ -250,7 +251,7 @@ class TestRemoveDefaultCaCerts(TestCase):
mock.patch.object(util, 'delete_dir_contents'))
mock_write = mocks.enter_context(
mock.patch.object(util, 'write_file'))
- mock_subp = mocks.enter_context(mock.patch.object(util, 'subp'))
+ mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp'))
cc_ca_certs.remove_default_ca_certs()
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
index 8c476418..7918c609 100644
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ b/tests/unittests/test_handler/test_handler_chef.py
@@ -41,7 +41,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200)
ret = (None, None) # stdout, stderr but capture=False
- with mock.patch("cloudinit.config.cc_chef.util.subp_blob_in_tempfile",
+ with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile",
return_value=ret) as m_subp_blob:
cc_chef.install_chef_from_omnibus()
# admittedly whitebox, but assuming subp_blob_in_tempfile works
@@ -52,7 +52,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
m_subp_blob.call_args_list)
@mock.patch('cloudinit.config.cc_chef.url_helper.readurl')
- @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile')
+ @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl):
"""install_chef_from_omnibus retries OMNIBUS_URL upon failure."""
@@ -81,7 +81,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
m_subp_blob.call_args_list[0][1])
@mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
- @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile')
+ @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
"""install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
chef_outfile = self.tmp_path('chef.out', self.new_root)
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
index 0e51f17a..4f4a57fa 100644
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -44,7 +44,7 @@ class TestGetMbrHddSize(TestCase):
super(TestGetMbrHddSize, self).setUp()
self.patches = ExitStack()
self.subp = self.patches.enter_context(
- mock.patch.object(cc_disk_setup.util, 'subp'))
+ mock.patch.object(cc_disk_setup.subp, 'subp'))
def tearDown(self):
super(TestGetMbrHddSize, self).tearDown()
@@ -173,7 +173,7 @@ class TestUpdateFsSetupDevices(TestCase):
@mock.patch('cloudinit.config.cc_disk_setup.find_device_node',
return_value=('/dev/xdb1', False))
@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None)
-@mock.patch('cloudinit.config.cc_disk_setup.util.subp', return_value=('', ''))
+@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', ''))
class TestMkfsCommandHandling(CiTestCase):
with_logs = True
@@ -204,7 +204,7 @@ class TestMkfsCommandHandling(CiTestCase):
subp.assert_called_once_with(
'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True)
- @mock.patch('cloudinit.config.cc_disk_setup.util.which')
+ @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args):
"""mkfs observes extra_opts and overwrite settings when cmd is not
present."""
@@ -222,7 +222,7 @@ class TestMkfsCommandHandling(CiTestCase):
'-L', 'without_cmd', '-F', 'are', 'added'],
shell=False)
- @mock.patch('cloudinit.config.cc_disk_setup.util.which')
+ @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
def test_mkswap(self, m_which, subp, *args):
"""mkfs observes extra_opts and overwrite settings when cmd is not
present."""
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index 501bcca5..7f039b79 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -2,7 +2,7 @@
from cloudinit import cloud
from cloudinit.config import cc_growpart
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import TestCase
@@ -95,7 +95,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_auto_is_fine(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
config = {'growpart': {'mode': 'auto'}}
@@ -109,7 +109,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_mode_growpart_is_exception(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
config = {'growpart': {'mode': "growpart"}}
self.assertRaises(
@@ -122,7 +122,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_mode_auto_prefers_growpart(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=(HELP_GROWPART_RESIZE, "")) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
@@ -133,7 +133,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_mode_auto_falls_back_to_gpart(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=("", HELP_GPART)) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGpart)
diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py
index db92a7e2..7d165687 100644
--- a/tests/unittests/test_handler/test_handler_landscape.py
+++ b/tests/unittests/test_handler/test_handler_landscape.py
@@ -49,8 +49,8 @@ class TestLandscape(FilesystemMockingTestCase):
"'landscape' key existed in config, but not a dict",
str(context_manager.exception))
- @mock.patch('cloudinit.config.cc_landscape.util')
- def test_handler_restarts_landscape_client(self, m_util):
+ @mock.patch('cloudinit.config.cc_landscape.subp')
+ def test_handler_restarts_landscape_client(self, m_subp):
"""handler restarts lansdscape-client after install."""
mycloud = self._get_cloud('ubuntu')
cfg = {'landscape': {'client': {}}}
@@ -60,7 +60,7 @@ class TestLandscape(FilesystemMockingTestCase):
cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
self.assertEqual(
[mock.call(['service', 'landscape-client', 'restart'])],
- m_util.subp.call_args_list)
+ m_subp.subp.call_args_list)
def test_handler_installs_client_and_creates_config_file(self):
"""Write landscape client.conf and install landscape-client."""
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
index 407aa6c4..47e7d804 100644
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ b/tests/unittests/test_handler/test_handler_locale.py
@@ -84,7 +84,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n')
cfg = {'locale': 'C.UTF-8'}
cc = self._get_cloud('ubuntu')
- with mock.patch('cloudinit.distros.debian.util.subp') as m_subp:
+ with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp:
with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN',
locale_conf):
cc_locale.handle('cc_locale', cfg, cc, LOG, [])
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index 40b521e5..21011204 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -31,13 +31,13 @@ class TestLxd(t_help.CiTestCase):
return cc
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_init(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_init(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
- mock_util.which.return_value = True
+ mock_subp.which.return_value = True
m_maybe_clean.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertTrue(mock_util.which.called)
+ self.assertTrue(mock_subp.which.called)
# no bridge config, so maybe_cleanup should not be called.
self.assertFalse(m_maybe_clean.called)
self.assertEqual(
@@ -45,14 +45,14 @@ class TestLxd(t_help.CiTestCase):
mock.call(
['lxd', 'init', '--auto', '--network-address=0.0.0.0',
'--storage-backend=zfs', '--storage-pool=poolname'])],
- mock_util.subp.call_args_list)
+ mock_subp.subp.call_args_list)
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_install(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_install(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
- mock_util.which.return_value = None
+ mock_subp.which.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
self.assertNotIn('WARN', self.logs.getvalue())
self.assertTrue(cc.distro.install_packages.called)
@@ -62,23 +62,23 @@ class TestLxd(t_help.CiTestCase):
self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux'])
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_init_does_nothing(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_util.subp.called)
+ self.assertFalse(mock_subp.subp.called)
self.assertFalse(m_maybe_clean.called)
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_lxd_does_nothing(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_util.subp.called)
+ self.assertFalse(mock_subp.subp.called)
self.assertFalse(m_maybe_clean.called)
def test_lxd_debconf_new_full(self):
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py
index c013a538..6891e15f 100644
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ b/tests/unittests/test_handler/test_handler_mcollective.py
@@ -136,8 +136,9 @@ class TestHandler(t_help.TestCase):
cc = cloud.Cloud(ds, paths, {}, d, None)
return cc
+ @t_help.mock.patch("cloudinit.config.cc_mcollective.subp")
@t_help.mock.patch("cloudinit.config.cc_mcollective.util")
- def test_mcollective_install(self, mock_util):
+ def test_mcollective_install(self, mock_util, mock_subp):
cc = self._get_cloud('ubuntu')
cc.distro = t_help.mock.MagicMock()
mock_util.load_file.return_value = b""
@@ -147,8 +148,8 @@ class TestHandler(t_help.TestCase):
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
self.assertEqual(install_pkg, ('mcollective',))
- self.assertTrue(mock_util.subp.called)
- self.assertEqual(mock_util.subp.call_args_list[0][0][0],
+ self.assertTrue(mock_subp.subp.called)
+ self.assertEqual(mock_subp.subp.call_args_list[0][0][0],
['service', 'mcollective', 'restart'])
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index 35e72bd1..e193f4d4 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -155,8 +155,8 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
'mock_is_block_device',
return_value=True)
- self.add_patch('cloudinit.config.cc_mounts.util.subp',
- 'm_util_subp')
+ self.add_patch('cloudinit.config.cc_mounts.subp.subp',
+ 'm_subp_subp')
self.add_patch('cloudinit.config.cc_mounts.util.mounts',
'mock_util_mounts',
@@ -183,6 +183,18 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
return dev
+ def test_no_fstab(self):
+ """ Handle images which do not include an fstab. """
+ self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH))
+ fstab_expected_content = (
+ '%s\tnone\tswap\tsw,comment=cloudconfig\t'
+ '0\t0\n' % (self.swap_path,)
+ )
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ with open(cc_mounts.FSTAB_PATH, 'r') as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
def test_swap_integrity(self):
'''Ensure that the swap file is correctly created and can
swapon successfully. Fixing the corner case of:
@@ -260,15 +272,18 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
'/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n'
)
fstab_expected_content = fstab_original_content
- cc = {'mounts': [
- ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]}
+ cc = {
+ 'mounts': [
+ ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']
+ ]
+ }
with open(cc_mounts.FSTAB_PATH, 'w') as fd:
fd.write(fstab_original_content)
with open(cc_mounts.FSTAB_PATH, 'r') as fd:
fstab_new_content = fd.read()
self.assertEqual(fstab_expected_content, fstab_new_content)
cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
- self.m_util_subp.assert_has_calls([
+ self.m_subp_subp.assert_has_calls([
mock.call(['mount', '-a']),
mock.call(['systemctl', 'daemon-reload'])])
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
index 463d892a..92a33ec1 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -83,50 +83,50 @@ class TestNtp(FilesystemMockingTestCase):
ntpconfig['template_name'] = os.path.basename(confpath)
return ntpconfig
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_ntp_install(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_ntp_install(self, mock_subp):
"""ntp_install_client runs install_func when check_exe is absent."""
- mock_util.which.return_value = None # check_exe not found.
+ mock_subp.which.return_value = None # check_exe not found.
install_func = mock.MagicMock()
cc_ntp.install_ntp_client(install_func,
packages=['ntpx'], check_exe='ntpdx')
- mock_util.which.assert_called_with('ntpdx')
+ mock_subp.which.assert_called_with('ntpdx')
install_func.assert_called_once_with(['ntpx'])
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_ntp_install_not_needed(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_ntp_install_not_needed(self, mock_subp):
"""ntp_install_client doesn't install when check_exe is found."""
client = 'chrony'
- mock_util.which.return_value = [client] # check_exe found.
+ mock_subp.which.return_value = [client] # check_exe found.
install_func = mock.MagicMock()
cc_ntp.install_ntp_client(install_func, packages=[client],
check_exe=client)
install_func.assert_not_called()
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_ntp_install_no_op_with_empty_pkg_list(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_ntp_install_no_op_with_empty_pkg_list(self, mock_subp):
"""ntp_install_client runs install_func with empty list"""
- mock_util.which.return_value = None # check_exe not found
+ mock_subp.which.return_value = None # check_exe not found
install_func = mock.MagicMock()
cc_ntp.install_ntp_client(install_func, packages=[],
check_exe='timesyncd')
install_func.assert_called_once_with([])
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_reload_ntp_defaults(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_reload_ntp_defaults(self, mock_subp):
"""Test service is restarted/reloaded (defaults)"""
service = 'ntp_service_name'
cmd = ['service', service, 'restart']
cc_ntp.reload_ntp(service)
- mock_util.subp.assert_called_with(cmd, capture=True)
+ mock_subp.subp.assert_called_with(cmd, capture=True)
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_reload_ntp_systemd(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_reload_ntp_systemd(self, mock_subp):
"""Test service is restarted/reloaded (systemd)"""
service = 'ntp_service_name'
cc_ntp.reload_ntp(service, systemd=True)
cmd = ['systemctl', 'reload-or-restart', service]
- mock_util.subp.assert_called_with(cmd, capture=True)
+ mock_subp.subp.assert_called_with(cmd, capture=True)
def test_ntp_rename_ntp_conf(self):
"""When NTP_CONF exists, rename_ntp moves it."""
@@ -440,9 +440,10 @@ class TestNtp(FilesystemMockingTestCase):
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
self.assertEqual(0, m_select.call_count)
+ @mock.patch("cloudinit.config.cc_ntp.subp")
@mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
@mock.patch("cloudinit.distros.Distro.uses_systemd")
- def test_ntp_the_whole_package(self, m_sysd, m_select):
+ def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp):
"""Test enabled config renders template, and restarts service """
cfg = {'ntp': {'enabled': True}}
for distro in cc_ntp.distros:
@@ -458,12 +459,12 @@ class TestNtp(FilesystemMockingTestCase):
# allow use of util.mergemanydict
m_util.mergemanydict.side_effect = util.mergemanydict
# default client is present
- m_util.which.return_value = True
+ m_subp.which.return_value = True
# use the config 'enabled' value
m_util.is_false.return_value = util.is_false(
cfg['ntp']['enabled'])
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
- m_util.subp.assert_called_with(
+ m_subp.subp.assert_called_with(
['systemctl', 'reload-or-restart',
service_name], capture=True)
self.assertEqual(
@@ -503,7 +504,7 @@ class TestNtp(FilesystemMockingTestCase):
expected_client = mycloud.distro.preferred_ntp_clients[0]
self.assertEqual('ntp', expected_client)
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_snappy_system_picks_timesyncd(self, m_which):
"""Test snappy systems prefer installed clients"""
@@ -528,7 +529,7 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(cfg))
self.assertEqual(sorted(expected_cfg), sorted(result))
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_distro_searches_all_preferred_clients(self, m_which):
"""Test select_ntp_client search all distro perferred clients """
# nothing is installed
@@ -546,7 +547,7 @@ class TestNtp(FilesystemMockingTestCase):
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which):
"""Test user_cfg.ntp_client='auto' defaults to distro search"""
# nothing is installed
@@ -566,7 +567,7 @@ class TestNtp(FilesystemMockingTestCase):
@mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template')
@mock.patch('cloudinit.cloud.Cloud.get_template_filename')
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_custom_client_overrides_installed_clients(self, m_which,
m_tmpfn, m_write):
"""Test user client is installed despite other clients present """
@@ -582,7 +583,7 @@ class TestNtp(FilesystemMockingTestCase):
m_install.assert_called_with([client])
m_which.assert_called_with(client)
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which):
"""Test distro system_config overrides builtin preferred ntp clients"""
system_client = 'chrony'
@@ -597,7 +598,7 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(result))
m_which.assert_has_calls([])
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_user_config_overrides_system_cfg(self, m_which):
"""Test user-data overrides system_config ntp_client"""
system_client = 'chrony'
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
index 04aa7d03..62388ac6 100644
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ b/tests/unittests/test_handler/test_handler_puppet.py
@@ -12,11 +12,11 @@ import textwrap
LOG = logging.getLogger(__name__)
-@mock.patch('cloudinit.config.cc_puppet.util')
+@mock.patch('cloudinit.config.cc_puppet.subp.subp')
@mock.patch('cloudinit.config.cc_puppet.os')
class TestAutostartPuppet(CiTestCase):
- def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_util):
+ def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp):
"""Update /etc/default/puppet to autostart if it exists."""
def _fake_exists(path):
@@ -27,9 +27,9 @@ class TestAutostartPuppet(CiTestCase):
self.assertEqual(
[mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)],
- m_util.subp.call_args_list)
+ m_subp.call_args_list)
- def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_util):
+ def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp):
"""If systemctl is present, enable puppet via systemctl."""
def _fake_exists(path):
@@ -39,9 +39,9 @@ class TestAutostartPuppet(CiTestCase):
cc_puppet._autostart_puppet(LOG)
expected_calls = [mock.call(
['/bin/systemctl', 'enable', 'puppet.service'], capture=False)]
- self.assertEqual(expected_calls, m_util.subp.call_args_list)
+ self.assertEqual(expected_calls, m_subp.call_args_list)
- def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_util):
+ def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp):
"""If chkconfig is present, enable puppet via checkcfg."""
def _fake_exists(path):
@@ -51,7 +51,7 @@ class TestAutostartPuppet(CiTestCase):
cc_puppet._autostart_puppet(LOG)
expected_calls = [mock.call(
['/sbin/chkconfig', 'puppet', 'on'], capture=False)]
- self.assertEqual(expected_calls, m_util.subp.call_args_list)
+ self.assertEqual(expected_calls, m_subp.call_args_list)
@mock.patch('cloudinit.config.cc_puppet._autostart_puppet')
@@ -81,7 +81,7 @@ class TestPuppetHandle(CiTestCase):
"no 'puppet' configuration found", self.logs.getvalue())
self.assertEqual(0, m_auto.call_count)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto):
"""Cloud-config 'puppet' configuration starts puppet."""
mycloud = self._get_cloud('ubuntu')
@@ -92,7 +92,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(['service', 'puppet', 'start'], capture=False)],
m_subp.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
"""Cloud-config empty 'puppet' configuration installs latest puppet."""
mycloud = self._get_cloud('ubuntu')
@@ -103,7 +103,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _):
"""Cloud-config with 'puppet' key installs when 'install' is True."""
mycloud = self._get_cloud('ubuntu')
@@ -114,7 +114,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_installs_puppet_version(self, m_subp, _):
"""Cloud-config 'puppet' configuration can specify a version."""
mycloud = self._get_cloud('ubuntu')
@@ -125,7 +125,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', '3.8'))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto):
"""When 'conf' is provided update values in PUPPET_CONF_PATH."""
mycloud = self._get_cloud('ubuntu')
@@ -141,7 +141,7 @@ class TestPuppetHandle(CiTestCase):
expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n'
self.assertEqual(expected, content)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto):
"""When csr_attributes is provided
creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
@@ -149,15 +149,20 @@ class TestPuppetHandle(CiTestCase):
mycloud.distro = mock.MagicMock()
cfg = {
'puppet': {
- 'csr_attributes': {
- 'custom_attributes': {
- '1.2.840.113549.1.9.7': '342thbjkt82094y0ut'
- 'hhor289jnqthpc2290'},
- 'extension_requests': {
- 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E',
- 'pp_image_name': 'my_ami_image',
- 'pp_preshared_key': '342thbjkt82094y0uthhor289jnqthpc2290'}
- }}}
+ 'csr_attributes': {
+ 'custom_attributes': {
+ '1.2.840.113549.1.9.7':
+ '342thbjkt82094y0uthhor289jnqthpc2290'
+ },
+ 'extension_requests': {
+ 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E',
+ 'pp_image_name': 'my_ami_image',
+ 'pp_preshared_key':
+ '342thbjkt82094y0uthhor289jnqthpc2290'
+ }
+ }
+ }
+ }
csr_attributes = 'cloudinit.config.cc_puppet.' \
'PUPPET_CSR_ATTRIBUTES_PATH'
with mock.patch(csr_attributes, self.csr_attributes_path):
diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py
index 9ce334ac..73237d68 100644
--- a/tests/unittests/test_handler/test_handler_runcmd.py
+++ b/tests/unittests/test_handler/test_handler_runcmd.py
@@ -2,7 +2,7 @@
from cloudinit.config.cc_runcmd import handle, schema
from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
+from cloudinit import (distros, helpers, cloud, subp, util)
from cloudinit.tests.helpers import (
CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin,
skipUnlessJsonSchema)
@@ -20,7 +20,7 @@ class TestRuncmd(FilesystemMockingTestCase):
def setUp(self):
super(TestRuncmd, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = self.tmp_dir()
def _get_cloud(self, distro):
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index abecc53b..85167f19 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -17,6 +17,7 @@ from io import BytesIO
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.sources import DataSourceNone
@@ -35,8 +36,8 @@ class TestRandomSeed(t_help.TestCase):
self.unapply = []
# by default 'which' has nothing in its path
- self.apply_patches([(util, 'which', self._which)])
- self.apply_patches([(util, 'subp', self._subp)])
+ self.apply_patches([(subp, 'which', self._which)])
+ self.apply_patches([(subp, 'subp', self._subp)])
self.subp_called = []
self.whichdata = {}
diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py
index 410e6f77..26f7648f 100644
--- a/tests/unittests/test_handler/test_handler_spacewalk.py
+++ b/tests/unittests/test_handler/test_handler_spacewalk.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.config import cc_spacewalk
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests import helpers
@@ -19,20 +19,20 @@ class TestSpacewalk(helpers.TestCase):
}
}
- @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
- def test_not_is_registered(self, mock_util_subp):
- mock_util_subp.side_effect = util.ProcessExecutionError(exit_code=1)
+ @mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
+ def test_not_is_registered(self, mock_subp):
+ mock_subp.side_effect = subp.ProcessExecutionError(exit_code=1)
self.assertFalse(cc_spacewalk.is_registered())
- @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
- def test_is_registered(self, mock_util_subp):
- mock_util_subp.side_effect = None
+ @mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
+ def test_is_registered(self, mock_subp):
+ mock_subp.side_effect = None
self.assertTrue(cc_spacewalk.is_registered())
- @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
- def test_do_register(self, mock_util_subp):
+ @mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
+ def test_do_register(self, mock_subp):
cc_spacewalk.do_register(**self.space_cfg['spacewalk'])
- mock_util_subp.assert_called_with([
+ mock_subp.assert_called_with([
'rhnreg_ks',
'--serverUrl', 'https://localhost/XMLRPC',
'--profilename', 'test',
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
index e19d13b8..99f0b06c 100644
--- a/tests/unittests/test_handler/test_schema.py
+++ b/tests/unittests/test_handler/test_schema.py
@@ -34,7 +34,8 @@ class GetSchemaTest(CiTestCase):
'cc_ubuntu_advantage',
'cc_ubuntu_drivers',
'cc_write_files',
- 'cc_zypper_add_repo'
+ 'cc_zypper_add_repo',
+ 'cc_chef'
],
[subschema['id'] for subschema in schema['allOf']])
self.assertEqual('cloud-config-schema', schema['id'])
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 84d3a5f0..54cc8469 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -8,6 +8,7 @@ from cloudinit.net import (
renderers, sysconfig)
from cloudinit.sources.helpers import openstack
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit import safeyaml as yaml
@@ -425,6 +426,11 @@ network:
mtu: 9000
parameters:
gratuitous-arp: 2
+ bond2:
+ interfaces:
+ - ens5
+ macaddress: 68:05:ca:64:d3:6e
+ mtu: 9000
ethernets:
ens3:
dhcp4: false
@@ -436,6 +442,11 @@ network:
dhcp6: false
match:
macaddress: 52:54:00:11:22:ff
+ ens5:
+ dhcp4: false
+ dhcp6: false
+ match:
+ macaddress: 52:54:00:99:11:99
version: 2
"""
@@ -3192,7 +3203,7 @@ USERCTL=no
def test_check_ifcfg_rh(self):
"""ifcfg-rh plugin is added NetworkManager.conf if conf present."""
render_dir = self.tmp_dir()
- nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
+ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a list here
@@ -3215,7 +3226,7 @@ USERCTL=no
"""ifcfg-rh plugin is append when plugins is a string."""
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
- nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
+ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a value here
@@ -3240,7 +3251,7 @@ USERCTL=no
"""enable_ifcfg_plugin creates plugins value if missing."""
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
- nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
+ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is missing
@@ -3332,7 +3343,7 @@ USERCTL=no
USERCTL=no
VLAN=yes
""")
- }
+ }
self._compare_files_to_expected(
expected, self._render_and_read(network_config=v2data))
@@ -3406,7 +3417,7 @@ USERCTL=no
TYPE=Ethernet
USERCTL=no
"""),
- }
+ }
for dhcp_ver in ('dhcp4', 'dhcp6'):
v2data = copy.deepcopy(v2base)
if dhcp_ver == 'dhcp6':
@@ -3920,7 +3931,7 @@ class TestNetplanCleanDefault(CiTestCase):
files = sorted(populate_dir(tmpd, content))
netplan._clean_default(target=tmpd)
found = [t for t in files if os.path.exists(t)]
- expected = [util.target_path(tmpd, f) for f in (astamp, anet, ayaml)]
+ expected = [subp.target_path(tmpd, f) for f in (astamp, anet, ayaml)]
self.assertEqual(sorted(expected), found)
@@ -3933,7 +3944,7 @@ class TestNetplanPostcommands(CiTestCase):
@mock.patch.object(netplan.Renderer, '_netplan_generate')
@mock.patch.object(netplan.Renderer, '_net_setup_link')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_netplan_render_calls_postcmds(self, mock_subp,
mock_netplan_generate,
mock_net_setup_link):
@@ -3947,7 +3958,7 @@ class TestNetplanPostcommands(CiTestCase):
render_target = 'netplan.yaml'
renderer = netplan.Renderer(
{'netplan_path': render_target, 'postcmds': True})
- mock_subp.side_effect = iter([util.ProcessExecutionError])
+ mock_subp.side_effect = iter([subp.ProcessExecutionError])
renderer.render_network_state(ns, target=render_dir)
mock_netplan_generate.assert_called_with(run=True)
@@ -3955,7 +3966,7 @@ class TestNetplanPostcommands(CiTestCase):
@mock.patch('cloudinit.util.SeLinuxGuard')
@mock.patch.object(netplan, "get_devicelist")
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel):
mock_sel.__enter__ = mock.Mock(return_value=False)
mock_sel.__exit__ = mock.Mock()
@@ -3971,7 +3982,7 @@ class TestNetplanPostcommands(CiTestCase):
renderer = netplan.Renderer(
{'netplan_path': render_target, 'postcmds': True})
mock_subp.side_effect = iter([
- util.ProcessExecutionError,
+ subp.ProcessExecutionError,
('', ''),
('', ''),
])
@@ -4260,7 +4271,7 @@ class TestNetplanRoundTrip(CiTestCase):
def setUp(self):
super(TestNetplanRoundTrip, self).setUp()
- self.add_patch('cloudinit.net.netplan.util.subp', 'm_subp')
+ self.add_patch('cloudinit.net.netplan.subp.subp', 'm_subp')
self.m_subp.return_value = (self.NETPLAN_INFO_OUT, '')
def _render_and_read(self, network_config=None, state=None,
@@ -4765,13 +4776,13 @@ class TestNetRenderers(CiTestCase):
def test_sysconfig_available_uses_variant_mapping(self, m_distro, m_avail):
m_avail.return_value = True
distro_values = [
- ('opensuse', '', ''),
- ('opensuse-leap', '', ''),
- ('opensuse-tumbleweed', '', ''),
- ('sles', '', ''),
- ('centos', '', ''),
- ('fedora', '', ''),
- ('redhat', '', ''),
+ ('opensuse', '', ''),
+ ('opensuse-leap', '', ''),
+ ('opensuse-tumbleweed', '', ''),
+ ('sles', '', ''),
+ ('centos', '', ''),
+ ('fedora', '', ''),
+ ('redhat', '', ''),
]
for (distro_name, distro_version, flavor) in distro_values:
m_distro.return_value = (distro_name, distro_version, flavor)
@@ -5157,7 +5168,7 @@ def _gzip_data(data):
class TestRenameInterfaces(CiTestCase):
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_all(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
@@ -5188,7 +5199,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_no_driver_no_device_id(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'interface0', None, None),
@@ -5219,7 +5230,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_all_bounce(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
@@ -5254,7 +5265,7 @@ class TestRenameInterfaces(CiTestCase):
mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True)
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_duplicate_macs(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
@@ -5283,7 +5294,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_duplicate_macs_driver_no_devid(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None),
@@ -5312,7 +5323,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_multi_mac_dups(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
@@ -5351,7 +5362,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_macs_case_insensitive(self, mock_subp):
"""_rename_interfaces must support upper or lower case macs."""
renames = [
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 48296c30..414b4830 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -7,7 +7,7 @@ SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
class TestInterfacesByMac(CiTestCase):
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
@mock.patch('cloudinit.util.is_FreeBSD')
def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp):
mock_is_FreeBSD.return_value = True
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 8b1e6042..495e2669 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -5,6 +5,7 @@ import sys
import pytest
+from cloudinit import subp
from cloudinit import util
# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
@@ -12,6 +13,7 @@ DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
"netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+@pytest.mark.allow_subp_for(sys.executable)
class TestRenderCloudCfg:
cmd = [sys.executable, os.path.realpath('tools/render-cloudcfg')]
@@ -20,7 +22,7 @@ class TestRenderCloudCfg:
@pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir):
outfile = tmpdir.join('outcfg').strpath
- util.subp(
+ subp.subp(
self.cmd + ['--variant', variant, self.tmpl_path, outfile])
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
@@ -31,7 +33,7 @@ class TestRenderCloudCfg:
@pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
def test_variant_sets_default_user_in_cloud_cfg(self, variant, tmpdir):
outfile = tmpdir.join('outcfg').strpath
- util.subp(
+ subp.subp(
self.cmd + ['--variant', variant, self.tmpl_path, outfile])
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
@@ -49,7 +51,7 @@ class TestRenderCloudCfg:
self, variant, renderers, tmpdir
):
outfile = tmpdir.join('outcfg').strpath
- util.subp(
+ subp.subp(
self.cmd + ['--variant', variant, self.tmpl_path, outfile])
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
index b3e083c6..b60a66ab 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -93,10 +93,15 @@ class TextKvpReporter(CiTestCase):
def test_not_truncate_kvp_file_modified_after_boot(self):
with open(self.tmp_file_path, "wb+") as f:
kvp = {'key': 'key1', 'value': 'value1'}
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8')))
+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ kvp["key"].encode("utf-8"),
+ kvp["value"].encode("utf-8"),
+ )
f.write(data)
cur_time = time.time()
os.utime(self.tmp_file_path, (cur_time, cur_time))
@@ -131,7 +136,7 @@ class TextKvpReporter(CiTestCase):
self.assertEqual(0, len(kvps))
@mock.patch('cloudinit.distros.uses_systemd')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_boot_telemetry(self, m_subp, m_sysd):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]"
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
index 4cd27eed..53d3cd5a 100644
--- a/tests/unittests/test_rh_subscription.py
+++ b/tests/unittests/test_rh_subscription.py
@@ -6,7 +6,7 @@ import copy
import logging
from cloudinit.config import cc_rh_subscription
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase, mock
@@ -56,7 +56,7 @@ class GoodTests(CiTestCase):
'''
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
- m_sman_cli.side_effect = [util.ProcessExecutionError, (reg, 'bar')]
+ m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')]
self.handle(self.name, self.config, self.cloud_init,
self.log, self.args)
self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list)
@@ -93,7 +93,7 @@ class GoodTests(CiTestCase):
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
m_sman_cli.side_effect = [
- util.ProcessExecutionError,
+ subp.ProcessExecutionError,
(reg, 'bar'),
('Service level set to: self-support', ''),
('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
@@ -161,7 +161,7 @@ class TestBadInput(CiTestCase):
def test_no_password(self, m_sman_cli):
'''Attempt to register without the password key/value.'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_no_password, self.cloud_init,
self.log, self.args)
@@ -169,7 +169,7 @@ class TestBadInput(CiTestCase):
def test_no_org(self, m_sman_cli):
'''Attempt to register without the org key/value.'''
- m_sman_cli.side_effect = [util.ProcessExecutionError]
+ m_sman_cli.side_effect = [subp.ProcessExecutionError]
self.handle(self.name, self.config_no_key, self.cloud_init,
self.log, self.args)
m_sman_cli.assert_called_with(['identity'])
@@ -182,7 +182,7 @@ class TestBadInput(CiTestCase):
def test_service_level_without_auto(self, m_sman_cli):
'''Attempt to register using service-level without auto-attach key.'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_service, self.cloud_init,
self.log, self.args)
@@ -195,7 +195,7 @@ class TestBadInput(CiTestCase):
'''
Register with pools that are not in the format of a list
'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_badpool, self.cloud_init,
self.log, self.args)
@@ -208,7 +208,7 @@ class TestBadInput(CiTestCase):
'''
Register with repos that are not in the format of a list
'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_badrepo, self.cloud_init,
self.log, self.args)
@@ -222,7 +222,7 @@ class TestBadInput(CiTestCase):
'''
Attempt to register with a key that we don't know
'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_badkey, self.cloud_init,
self.log, self.args)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index b4767f0c..d15fc60b 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -374,13 +374,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
sshd_config = self.tmp_path('sshd_config')
util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ )
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(
- auth_key_entries, [])
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
self.assertTrue(VALID_CONTENT['rsa'] in content)
@@ -398,11 +398,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
sshd_config = self.tmp_path('sshd_config')
util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ )
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
+ fpw.pw_name, sshd_config
+ )
content = ssh_util.update_authorized_keys(auth_key_entries, [])
self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 4b439267..409dba61 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,25 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
import io
-import json
import logging
import os
import re
import shutil
import stat
-import sys
import tempfile
import yaml
from unittest import mock
+from cloudinit import subp
from cloudinit import importer, util
from cloudinit.tests import helpers
-BASH = util.which('bash')
-BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
-
-
class FakeSelinux(object):
def __init__(self, match_what):
@@ -103,6 +98,17 @@ class TestWriteFile(helpers.TestCase):
self.assertTrue(os.path.isdir(dirname))
self.assertTrue(os.path.isfile(path))
+ def test_dir_is_not_created_if_ensure_dir_false(self):
+ """Verify directories are not created if ensure_dir_exists is False."""
+ dirname = os.path.join(self.tmp, "subdir")
+ path = os.path.join(dirname, "NewFile.txt")
+ contents = "Hey there"
+
+ with self.assertRaises(FileNotFoundError):
+ util.write_file(path, contents, ensure_dir_exists=False)
+
+ self.assertFalse(os.path.isdir(dirname))
+
def test_explicit_mode(self):
"""Verify explicit file mode works properly."""
path = os.path.join(self.tmp, "NewFile.txt")
@@ -115,29 +121,29 @@ class TestWriteFile(helpers.TestCase):
file_stat = os.stat(path)
self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode))
- def test_copy_mode_no_existing(self):
- """Verify that file is created with mode 0o644 if copy_mode
+ def test_preserve_mode_no_existing(self):
+ """Verify that file is created with mode 0o644 if preserve_mode
is true and there is no prior existing file."""
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
- util.write_file(path, contents, copy_mode=True)
+ util.write_file(path, contents, preserve_mode=True)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
file_stat = os.stat(path)
self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
- def test_copy_mode_with_existing(self):
+ def test_preserve_mode_with_existing(self):
"""Verify that file is created using mode of existing file
- if copy_mode is true."""
+ if preserve_mode is true."""
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
open(path, 'w').close()
os.chmod(path, 0o666)
- util.write_file(path, contents, copy_mode=True)
+ util.write_file(path, contents, preserve_mode=True)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
@@ -385,7 +391,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
@mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_device_info_from_zpool(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
@@ -408,17 +414,17 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
self.assertIsNone(ret)
@mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os):
"""Handle case where there is no zpool command"""
# mock /dev/zfs exists
m_os.path.exists.return_value = True
- m_sub.side_effect = util.ProcessExecutionError("No zpool cmd")
+ m_sub.side_effect = subp.ProcessExecutionError("No zpool cmd")
ret = util.get_device_info_from_zpool('vmzroot')
self.assertIsNone(ret)
@mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
@@ -430,7 +436,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
ret = util.get_device_info_from_zpool('vmzroot')
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_parse_mount_with_ext(self, mount_out):
mount_out.return_value = (
helpers.readResource('mount_parse_ext.txt'), '')
@@ -447,7 +453,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
ret = util.parse_mount('/not/existing/mount')
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_parse_mount_with_zfs(self, mount_out):
mount_out.return_value = (
helpers.readResource('mount_parse_zfs.txt'), '')
@@ -513,13 +519,13 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
"""
def _dmidecode_subp(cmd):
if cmd[-1] != key:
- raise util.ProcessExecutionError()
+ raise subp.ProcessExecutionError()
return (content, error)
self.patched_funcs.enter_context(
- mock.patch.object(util, 'which', lambda _: True))
+ mock.patch("cloudinit.subp.which", side_effect=lambda _: True))
self.patched_funcs.enter_context(
- mock.patch.object(util, 'subp', _dmidecode_subp))
+ mock.patch("cloudinit.subp.subp", side_effect=_dmidecode_subp))
def patch_mapping(self, new_mapping):
self.patched_funcs.enter_context(
@@ -546,10 +552,12 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
def test_dmidecode_not_used_on_arm(self):
self.patch_mapping({})
+ print("current =%s", subp)
self._create_sysfs_parent_directory()
dmi_val = 'from-dmidecode'
dmi_name = 'use-dmidecode'
self._configure_dmidecode_return(dmi_name, dmi_val)
+ print("now =%s", subp)
expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val}
found = {}
@@ -560,6 +568,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
for arch in expected:
m_uname.return_value = ('x-sysname', 'x-nodename',
'x-release', 'x-version', arch)
+ print("now2 =%s", subp)
found[arch] = util.read_dmi_data(dmi_name)
self.assertEqual(expected, found)
@@ -570,7 +579,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
def test_none_returned_if_dmidecode_not_in_path(self):
self.patched_funcs.enter_context(
- mock.patch.object(util, 'which', lambda _: False))
+ mock.patch.object(subp, 'which', lambda _: False))
self.patch_mapping({})
self.assertIsNone(util.read_dmi_data('expect-fail'))
@@ -734,218 +743,6 @@ class TestReadSeeded(helpers.TestCase):
self.assertEqual(found_ud, ud)
-class TestSubp(helpers.CiTestCase):
- allowed_subp = [BASH, 'cat', helpers.CiTestCase.SUBP_SHELL_TRUE,
- BOGUS_COMMAND, sys.executable]
-
- stdin2err = [BASH, '-c', 'cat >&2']
- stdin2out = ['cat']
- utf8_invalid = b'ab\xaadef'
- utf8_valid = b'start \xc3\xa9 end'
- utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
- printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
-
- def printf_cmd(self, *args):
- # bash's printf supports \xaa. So does /usr/bin/printf
- # but by using bash, we remove dependency on another program.
- return([BASH, '-c', 'printf "$@"', 'printf'] + list(args))
-
- def test_subp_handles_bytestrings(self):
- """subp can run a bytestring command if shell is True."""
- tmp_file = self.tmp_path('test.out')
- cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
- (out, _err) = util.subp(cmd.encode('utf-8'), shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
- self.assertEqual('HI MOM\n', util.load_file(tmp_file))
-
- def test_subp_handles_strings(self):
- """subp can run a string command if shell is True."""
- tmp_file = self.tmp_path('test.out')
- cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
- (out, _err) = util.subp(cmd, shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
- self.assertEqual('HI MOM\n', util.load_file(tmp_file))
-
- def test_subp_handles_utf8(self):
- # The given bytes contain utf-8 accented characters as seen in e.g.
- # the "deja dup" package in Ubuntu.
- cmd = self.printf_cmd(self.utf8_valid_2)
- (out, _err) = util.subp(cmd, capture=True)
- self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
-
- def test_subp_respects_decode_false(self):
- (out, err) = util.subp(self.stdin2out, capture=True, decode=False,
- data=self.utf8_valid)
- self.assertTrue(isinstance(out, bytes))
- self.assertTrue(isinstance(err, bytes))
- self.assertEqual(out, self.utf8_valid)
-
- def test_subp_decode_ignore(self):
- # this executes a string that writes invalid utf-8 to stdout
- (out, _err) = util.subp(self.printf_cmd('abc\\xaadef'),
- capture=True, decode='ignore')
- self.assertEqual(out, 'abcdef')
-
- def test_subp_decode_strict_valid_utf8(self):
- (out, _err) = util.subp(self.stdin2out, capture=True,
- decode='strict', data=self.utf8_valid)
- self.assertEqual(out, self.utf8_valid.decode('utf-8'))
-
- def test_subp_decode_invalid_utf8_replaces(self):
- (out, _err) = util.subp(self.stdin2out, capture=True,
- data=self.utf8_invalid)
- expected = self.utf8_invalid.decode('utf-8', 'replace')
- self.assertEqual(out, expected)
-
- def test_subp_decode_strict_raises(self):
- args = []
- kwargs = {'args': self.stdin2out, 'capture': True,
- 'decode': 'strict', 'data': self.utf8_invalid}
- self.assertRaises(UnicodeDecodeError, util.subp, *args, **kwargs)
-
- def test_subp_capture_stderr(self):
- data = b'hello world'
- (out, err) = util.subp(self.stdin2err, capture=True,
- decode=False, data=data,
- update_env={'LC_ALL': 'C'})
- self.assertEqual(err, data)
- self.assertEqual(out, b'')
-
- def test_subp_reads_env(self):
- with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
- out, _err = util.subp(self.printenv + ['FOO'], capture=True)
- self.assertEqual('FOO=BAR', out.splitlines()[0])
-
- def test_subp_env_and_update_env(self):
- out, _err = util.subp(
- self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
- env={'FOO': 'BAR'},
- update_env={'HOME': '/myhome', 'K2': 'V2'})
- self.assertEqual(
- ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines())
-
- def test_subp_update_env(self):
- extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
- with mock.patch.dict("os.environ", values=extra):
- out, _err = util.subp(
- self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
- update_env={'HOME': '/myhome', 'K2': 'V2'})
-
- self.assertEqual(
- ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines())
-
- def test_subp_warn_missing_shebang(self):
- """Warn on no #! in script"""
- noshebang = self.tmp_path('noshebang')
- util.write_file(noshebang, 'true\n')
-
- os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
- with self.allow_subp([noshebang]):
- self.assertRaisesRegex(util.ProcessExecutionError,
- r'Missing #! in script\?',
- util.subp, (noshebang,))
-
- def test_subp_combined_stderr_stdout(self):
- """Providing combine_capture as True redirects stderr to stdout."""
- data = b'hello world'
- (out, err) = util.subp(self.stdin2err, capture=True,
- combine_capture=True, decode=False, data=data)
- self.assertEqual(b'', err)
- self.assertEqual(data, out)
-
- def test_returns_none_if_no_capture(self):
- (out, err) = util.subp(self.stdin2out, data=b'', capture=False)
- self.assertIsNone(err)
- self.assertIsNone(out)
-
- def test_exception_has_out_err_are_bytes_if_decode_false(self):
- """Raised exc should have stderr, stdout as bytes if no decode."""
- with self.assertRaises(util.ProcessExecutionError) as cm:
- util.subp([BOGUS_COMMAND], decode=False)
- self.assertTrue(isinstance(cm.exception.stdout, bytes))
- self.assertTrue(isinstance(cm.exception.stderr, bytes))
-
- def test_exception_has_out_err_are_bytes_if_decode_true(self):
- """Raised exc should have stderr, stdout as string if no decode."""
- with self.assertRaises(util.ProcessExecutionError) as cm:
- util.subp([BOGUS_COMMAND], decode=True)
- self.assertTrue(isinstance(cm.exception.stdout, str))
- self.assertTrue(isinstance(cm.exception.stderr, str))
-
- def test_bunch_of_slashes_in_path(self):
- self.assertEqual("/target/my/path/",
- util.target_path("/target/", "//my/path/"))
- self.assertEqual("/target/my/path/",
- util.target_path("/target/", "///my/path/"))
-
- def test_c_lang_can_take_utf8_args(self):
- """Independent of system LC_CTYPE, args can contain utf-8 strings.
-
- When python starts up, its default encoding gets set based on
- the value of LC_CTYPE. If no system locale is set, the default
- encoding for both python2 and python3 in some paths will end up
- being ascii.
-
- Attempts to use setlocale or patching (or changing) os.environ
- in the current environment seem to not be effective.
-
- This test starts up a python with LC_CTYPE set to C so that
- the default encoding will be set to ascii. In such an environment
- Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
- """
- python_prog = '\n'.join([
- 'import json, sys',
- 'from cloudinit.util import subp',
- 'data = sys.stdin.read()',
- 'cmd = json.loads(data)',
- 'subp(cmd, capture=False)',
- ''])
- cmd = [BASH, '-c', 'echo -n "$@"', '--',
- self.utf8_valid.decode("utf-8")]
- python_subp = [sys.executable, '-c', python_prog]
-
- out, _err = util.subp(
- python_subp, update_env={'LC_CTYPE': 'C'},
- data=json.dumps(cmd).encode("utf-8"),
- decode=False)
- self.assertEqual(self.utf8_valid, out)
-
- def test_bogus_command_logs_status_messages(self):
- """status_cb gets status messages logs on bogus commands provided."""
- logs = []
-
- def status_cb(log):
- logs.append(log)
-
- with self.assertRaises(util.ProcessExecutionError):
- util.subp([BOGUS_COMMAND], status_cb=status_cb)
-
- expected = [
- 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND),
- 'ERROR: End run command: invalid command provided\n']
- self.assertEqual(expected, logs)
-
- def test_command_logs_exit_codes_to_status_cb(self):
- """status_cb gets status messages containing command exit code."""
- logs = []
-
- def status_cb(log):
- logs.append(log)
-
- with self.assertRaises(util.ProcessExecutionError):
- util.subp([BASH, '-c', 'exit 2'], status_cb=status_cb)
- util.subp([BASH, '-c', 'exit 0'], status_cb=status_cb)
-
- expected = [
- 'Begin run command: %s -c exit 2\n' % BASH,
- 'ERROR: End run command: exit(2)\n',
- 'Begin run command: %s -c exit 0\n' % BASH,
- 'End run command: exit(0)\n']
- self.assertEqual(expected, logs)
-
-
class TestEncode(helpers.TestCase):
"""Test the encoding functions"""
def test_decode_binary_plain_text_with_hex(self):
@@ -966,7 +763,7 @@ class TestProcessExecutionError(helpers.TestCase):
empty_description = 'Unexpected error while running command.'
def test_pexec_error_indent_text(self):
- error = util.ProcessExecutionError()
+ error = subp.ProcessExecutionError()
msg = 'abc\ndef'
formatted = 'abc\n{0}def'.format(' ' * 4)
self.assertEqual(error._indent_text(msg, indent_level=4), formatted)
@@ -976,10 +773,10 @@ class TestProcessExecutionError(helpers.TestCase):
error._indent_text(msg.encode()), type(msg.encode()))
def test_pexec_error_type(self):
- self.assertIsInstance(util.ProcessExecutionError(), IOError)
+ self.assertIsInstance(subp.ProcessExecutionError(), IOError)
def test_pexec_error_empty_msgs(self):
- error = util.ProcessExecutionError()
+ error = subp.ProcessExecutionError()
self.assertTrue(all(attr == self.empty_attr for attr in
(error.stderr, error.stdout, error.reason)))
self.assertEqual(error.description, self.empty_description)
@@ -993,7 +790,7 @@ class TestProcessExecutionError(helpers.TestCase):
stderr_msg = 'error error'
cmd = 'test command'
exit_code = 3
- error = util.ProcessExecutionError(
+ error = subp.ProcessExecutionError(
stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd)
self.assertEqual(str(error), self.template.format(
description=self.empty_description, stdout=stdout_msg,
@@ -1004,7 +801,7 @@ class TestProcessExecutionError(helpers.TestCase):
# make sure bytes is converted handled properly when formatting
stdout_msg = 'multi\nline\noutput message'.encode()
stderr_msg = 'multi\nline\nerror message\n\n\n'
- error = util.ProcessExecutionError(
+ error = subp.ProcessExecutionError(
stdout=stdout_msg, stderr=stderr_msg)
self.assertEqual(
str(error),
@@ -1170,7 +967,7 @@ class TestGetProcEnv(helpers.TestCase):
self.assertEqual(my_ppid, util.get_proc_ppid(my_pid))
-@mock.patch('cloudinit.util.subp')
+@mock.patch('cloudinit.subp.subp')
def test_find_devs_with_openbsd(m_subp):
m_subp.return_value = (
'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
@@ -1179,7 +976,7 @@ def test_find_devs_with_openbsd(m_subp):
assert devlist == ['/dev/cd0a', '/dev/sd1i']
-@mock.patch('cloudinit.util.subp')
+@mock.patch('cloudinit.subp.subp')
def test_find_devs_with_openbsd_with_criteria(m_subp):
m_subp.return_value = (
'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
@@ -1209,7 +1006,7 @@ def test_find_devs_with_freebsd(m_glob):
assert devlist == ['/dev/msdosfs/EFISYS']
-@mock.patch("cloudinit.util.subp")
+@mock.patch("cloudinit.subp.subp")
def test_find_devs_with_netbsd(m_subp):
side_effect_values = [
("ld0 dk0 dk1 cd0", ""),
diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py
index 394bee9f..c8b59d83 100644
--- a/tests/unittests/test_vmware/test_guestcust_util.py
+++ b/tests/unittests/test_vmware/test_guestcust_util.py
@@ -5,7 +5,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import util
+from cloudinit import subp
from cloudinit.sources.helpers.vmware.imc.config import Config
from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
@@ -21,7 +21,7 @@ class TestGuestCustUtil(CiTestCase):
This test is designed to verify the behavior if vmware-toolbox-cmd
is not installed.
"""
- with mock.patch.object(util, 'which', return_value=None):
+ with mock.patch.object(subp, 'which', return_value=None):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'), 'defaultVal')
@@ -30,10 +30,10 @@ class TestGuestCustUtil(CiTestCase):
This test is designed to verify the behavior if internal exception
is raised.
"""
- with mock.patch.object(util, 'which', return_value='/dummy/path'):
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'which', return_value='/dummy/path'):
+ with mock.patch.object(subp, 'subp',
return_value=('key=value', b''),
- side_effect=util.ProcessExecutionError(
+ side_effect=subp.ProcessExecutionError(
"subp failed", exit_code=99)):
# verify return value is 'defaultVal', not 'value'.
self.assertEqual(
@@ -45,28 +45,28 @@ class TestGuestCustUtil(CiTestCase):
This test is designed to verify the value could be parsed from
key = value of the given [section]
"""
- with mock.patch.object(util, 'which', return_value='/dummy/path'):
+ with mock.patch.object(subp, 'which', return_value='/dummy/path'):
# value is not blank
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('key = value ', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'value')
# value is blank
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('key = ', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'')
# value contains =
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('key=Bar=Wark', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'Bar=Wark')
# value contains specific characters
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('[a] b.c_d=e-f', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
@@ -87,7 +87,7 @@ class TestGuestCustUtil(CiTestCase):
# post gc status is YES, subp is called to execute command
cf._insertKey("MISC|POST-GC-STATUS", "YES")
conf = Config(cf)
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('ok', b'')) as mockobj:
self.assertEqual(
set_gc_status(conf, 'Successful'), ('ok', b''))
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index c3113705..78bce6c9 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -1,8 +1,14 @@
beezly
bipinbachhao
+BirknerAlex
+candlerb
+dermotbradley
dhensby
+landon912
lucasmoura
matthewruffell
nishigori
-tomponline
+onitake
+smoser
TheRealFalcon
+tomponline
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 666e24f5..fd1946a5 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -34,6 +34,13 @@ MAYBE_RELIABLE_YUM_INSTALL = [
'sh', '-c',
"""
error() { echo "$@" 1>&2; }
+ configure_repos_for_proxy_use() {
+ grep -q "^proxy=" /etc/yum.conf || return 0
+ error ":: http proxy in use => forcing the use of fixed URLs in /etc/yum.repos.d/*.repo"
+ sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo
+ sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
+ }
+ configure_repos_for_proxy_use
n=0; max=10;
bcmd="yum install --downloadonly --assumeyes --setopt=keepcache=1"
while n=$(($n+1)); do
@@ -48,6 +55,7 @@ MAYBE_RELIABLE_YUM_INSTALL = [
done
error ":: running yum install --cacheonly --assumeyes $*"
yum install --cacheonly --assumeyes "$@"
+ configure_repos_for_proxy_use
""",
'reliable-yum-install']
diff --git a/tools/run-container b/tools/run-container
index 7212550e..15948e77 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -351,9 +351,8 @@ wait_for_boot() {
if [ "$OS_NAME" = "centos" ]; then
debug 1 "configuring proxy ${http_proxy}"
inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
- inside "$name" sed -i s/enabled=1/enabled=0/ \
- /etc/yum/pluginconf.d/fastestmirror.conf
- inside "$name" sh -c "sed -i '/^#baseurl=/s/#// ; s/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo"
+ inside "$name" sh -c "sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo"
+ inside "$name" sh -c "sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
else
debug 1 "do not know how to configure proxy on $OS_NAME"
fi
diff --git a/tox.ini b/tox.ini
index 5ee67368..3fd96702 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,7 +23,7 @@ setenv =
basepython = python3
deps =
# requirements
- pylint==2.3.1
+ pylint==2.5.3
# test-requirements because unit tests are now present in cloudinit tree
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
@@ -43,13 +43,10 @@ basepython = python2.7
deps = -r{toxinidir}/test-requirements.txt
[flake8]
-# E121: continuation line under-indented for hanging indent
-# E123: closing bracket does not match indentation of opening bracket’s line
-# E126: continuation line over-indented for hanging indent
# E226: missing whitespace around arithmetic operator
# W503: line break before binary operator
# W504: line break after binary operator
-ignore=E121,E123,E126,E226,W503,W504
+ignore=E226,W503,W504
exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
per-file-ignores =
cloudinit/cmd/main.py:E402
@@ -82,6 +79,7 @@ deps =
# test-requirements
httpretty==0.9.6
mock==1.3.0
+ pytest-catchlog==1.2.1
[testenv:xenial]
# When updating this commands definition, also update the definition in
@@ -135,3 +133,10 @@ commands = {envpython} -m tests.cloud_tests {posargs}
passenv = HOME TRAVIS
deps =
-r{toxinidir}/integration-requirements.txt
+
+[pytest]
+# TODO: s/--strict/--strict-markers/ once xenial support is dropped
+addopts = --strict
+markers =
+ allow_subp_for: allow subp usage for the given commands (disable_subp_usage)
+ allow_all_subp: allow all subp usage (disable_subp_usage)