summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <therealfalcon@gmail.com>2021-03-22 10:52:19 -0500
committerJames Falcon <therealfalcon@gmail.com>2021-03-22 10:52:19 -0500
commitece42b6ef1f32420a3b4c555c507cdf35a14512c (patch)
tree31b8e8c47d3e25585c38ae9e6c1bda806d4319d0
parentfcaec45c9973e2ccc7b2555c397bacc69d19bb4d (diff)
parentbad84ad44254e410e7857edd5de594155d513624 (diff)
downloadcloud-init-git-ece42b6ef1f32420a3b4c555c507cdf35a14512c.tar.gz
merge from upstream/master at 21.1-19-gbad84ad4
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md24
-rw-r--r--.github/workflows/stale.yml2
-rw-r--r--.gitignore3
-rw-r--r--.travis.yml98
-rw-r--r--ChangeLog109
-rw-r--r--HACKING.rst165
-rw-r--r--cloud-tests-requirements.txt2
-rw-r--r--cloudinit/apport.py1
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py9
-rw-r--r--cloudinit/cmd/main.py1
-rw-r--r--cloudinit/cmd/tests/test_main.py3
-rw-r--r--cloudinit/config/cc_apt_configure.py2
-rw-r--r--cloudinit/config/cc_ca_certs.py123
-rw-r--r--cloudinit/config/cc_keys_to_console.py27
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rh_subscription.py8
-rw-r--r--cloudinit/config/cc_seed_random.py12
-rw-r--r--cloudinit/config/cc_set_hostname.py4
-rwxr-xr-xcloudinit/config/cc_set_passwords.py5
-rw-r--r--cloudinit/config/tests/test_keys_to_console.py34
-rw-r--r--cloudinit/config/tests/test_set_passwords.py40
-rwxr-xr-xcloudinit/distros/__init__.py2
-rw-r--r--cloudinit/distros/arch.py31
-rw-r--r--cloudinit/helpers.py7
-rw-r--r--cloudinit/net/__init__.py62
-rw-r--r--cloudinit/net/eni.py2
-rw-r--r--cloudinit/net/sysconfig.py7
-rw-r--r--cloudinit/net/tests/test_init.py119
-rw-r--r--cloudinit/settings.py2
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py195
-rw-r--r--cloudinit/sources/DataSourceEc2.py3
-rw-r--r--cloudinit/sources/DataSourceOVF.py164
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py8
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py165
-rw-r--r--cloudinit/sources/__init__.py13
-rw-r--r--cloudinit/sources/helpers/openstack.py5
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py5
-rw-r--r--cloudinit/sources/helpers/upcloud.py231
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py12
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py1
-rw-r--r--cloudinit/sources/tests/test_oracle.py4
-rw-r--r--cloudinit/stages.py116
-rw-r--r--cloudinit/tests/test_util.py56
-rw-r--r--cloudinit/util.py38
-rw-r--r--cloudinit/version.py2
-rw-r--r--doc/examples/cloud-config-ssh-keys.txt10
-rw-r--r--doc/examples/part-handler.txt1
-rw-r--r--doc/man/cloud-init.12
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--doc/rtd/topics/availability.rst1
-rw-r--r--doc/rtd/topics/datasources.rst1
-rw-r--r--doc/rtd/topics/datasources/aliyun.rst15
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst2
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst2
-rw-r--r--doc/rtd/topics/datasources/openstack.rst8
-rw-r--r--doc/rtd/topics/datasources/ovf.rst9
-rw-r--r--doc/rtd/topics/datasources/upcloud.rst24
-rw-r--r--doc/rtd/topics/debugging.rst6
-rw-r--r--doc/rtd/topics/examples.rst4
-rw-r--r--doc/rtd/topics/format.rst14
-rw-r--r--doc/rtd/topics/integration_tests.rst32
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst16
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst33
-rw-r--r--doc/rtd/topics/network-config.rst5
-rw-r--r--doc/rtd/topics/testing.rst173
-rw-r--r--integration-requirements.txt2
-rw-r--r--tests/cloud_tests/testcases/examples/TODO.md2
-rw-r--r--tests/integration_tests/__init__.py12
-rw-r--r--tests/integration_tests/bugs/test_gh570.py39
-rw-r--r--tests/integration_tests/bugs/test_gh626.py47
-rw-r--r--tests/integration_tests/bugs/test_gh632.py33
-rw-r--r--tests/integration_tests/bugs/test_gh668.py43
-rw-r--r--tests/integration_tests/bugs/test_gh671.py55
-rw-r--r--tests/integration_tests/bugs/test_lp1813396.py34
-rw-r--r--tests/integration_tests/bugs/test_lp1835584.py104
-rw-r--r--tests/integration_tests/bugs/test_lp1898997.py73
-rw-r--r--tests/integration_tests/bugs/test_lp1900837.py3
-rw-r--r--tests/integration_tests/bugs/test_lp1901011.py58
-rw-r--r--tests/integration_tests/bugs/test_lp1910835.py66
-rw-r--r--tests/integration_tests/bugs/test_lp1912844.py103
-rw-r--r--tests/integration_tests/clouds.py230
-rw-r--r--tests/integration_tests/conftest.py216
-rw-r--r--tests/integration_tests/instances.py116
-rw-r--r--tests/integration_tests/integration_settings.py59
-rw-r--r--tests/integration_tests/log_utils.py11
-rw-r--r--tests/integration_tests/modules/test_apt.py298
-rw-r--r--tests/integration_tests/modules/test_apt_configure_sources_list.py51
-rw-r--r--tests/integration_tests/modules/test_ca_certs.py91
-rw-r--r--tests/integration_tests/modules/test_cli.py45
-rw-r--r--tests/integration_tests/modules/test_keys_to_console.py48
-rw-r--r--tests/integration_tests/modules/test_lxd_bridge.py48
-rw-r--r--tests/integration_tests/modules/test_package_update_upgrade_install.py1
-rw-r--r--tests/integration_tests/modules/test_power_state_change.py90
-rw-r--r--tests/integration_tests/modules/test_seed_random_data.py6
-rw-r--r--tests/integration_tests/modules/test_set_password.py24
-rw-r--r--tests/integration_tests/modules/test_snap.py1
-rw-r--r--tests/integration_tests/modules/test_ssh_import_id.py5
-rw-r--r--tests/integration_tests/modules/test_ssh_keys_provided.py138
-rw-r--r--tests/integration_tests/modules/test_users_groups.py46
-rw-r--r--tests/integration_tests/test_logging.py22
-rw-r--r--tests/integration_tests/test_upgrade.py98
-rw-r--r--tests/unittests/test_data.py37
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py30
-rw-r--r--tests/unittests/test_datasource/test_azure.py177
-rw-r--r--tests/unittests/test_datasource/test_common.py3
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py8
-rw-r--r--tests/unittests/test_datasource/test_openstack.py32
-rw-r--r--tests/unittests/test_datasource/test_ovf.py291
-rw-r--r--tests/unittests/test_datasource/test_upcloud.py314
-rw-r--r--tests/unittests/test_distros/test_generic.py13
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py298
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py23
-rw-r--r--tests/unittests/test_net.py30
-rw-r--r--tests/unittests/test_util.py4
-rw-r--r--tests/unittests/test_vmware_config_file.py16
-rw-r--r--tools/.github-cla-signers15
-rwxr-xr-xtools/ds-identify7
-rw-r--r--tox.ini22
118 files changed, 5043 insertions, 880 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 2b59d10a..0aa97dd4 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,17 +1,19 @@
## Proposed Commit Message
<!-- Include a proposed commit message because all PRs are squash merged -->
-> summary: no more than 70 characters
->
-> A description of what the change being made is and why it is being
-> made, if the summary line is insufficient. The blank line above is
-> required. This should be wrapped at 72 characters, but otherwise has
-> no particular length requirements.
->
-> If you need to write multiple paragraphs, feel free.
->
-> LP: #NNNNNNN (replace with the appropriate bug reference or remove
-> this line entirely if there is no associated bug)
+```
+summary: no more than 70 characters
+
+A description of what the change being made is and why it is being
+made, if the summary line is insufficient. The blank line above is
+required. This should be wrapped at 72 characters, but otherwise has
+no particular length requirements.
+
+If you need to write multiple paragraphs, feel free.
+
+LP: #NNNNNNN (replace with the appropriate bug reference or remove
+this line entirely if there is no associated bug)
+```
## Additional Context
<!-- If relevant -->
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 20c5735d..3b71ba28 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -20,5 +20,5 @@ jobs:
If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging mitechie, and he will ensure that someone takes a look soon.
- (If the pull request is closed, please do feel free to reopen it if you wish to continue working on it.)
+ (If the pull request is closed and you would like to continue working on it, please do tag mitechie to reopen it.)
stale-pr-label: 'stale-pr'
diff --git a/.gitignore b/.gitignore
index 5a68bff9..eb26e0da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,6 +16,9 @@ stage
.pc/
.cache/
.mypy_cache/
+.pytest_cache/
+.vscode/
+htmlcov/
# Ignore packaging artifacts
cloud-init.dsc
diff --git a/.travis.yml b/.travis.yml
index 2fad49f3..690ab644 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -33,96 +33,17 @@ install:
script:
- tox
+env:
+ TOXENV=py3
+ PYTEST_ADDOPTS=-v # List all tests run by pytest
+
matrix:
fast_finish: true
- allow_failures:
- - name: "Integration Tests (WIP)"
include:
- python: 3.6
- env:
- TOXENV=py3
- PYTEST_ADDOPTS=-v # List all tests run by pytest
- - if: NOT branch =~ /^ubuntu\//
- cache:
- - directories:
- - lxd_images
- - chroots
- before_cache:
- - |
- # Find the most recent image file
- latest_file="$(sudo ls -Art /var/snap/lxd/common/lxd/images/ | tail -n 1)"
- # This might be <hash>.rootfs or <hash>, normalise
- latest_file="$(basename $latest_file .rootfs)"
- # Find all files with that prefix and copy them to our cache dir
- sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \;
- install:
- - git fetch --unshallow
- - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper
- - pip install .
- - pip install tox
- # bionic has lxd from deb installed, remove it first to ensure
- # pylxd talks only to the lxd from snap
- - sudo apt remove --purge lxd lxd-client
- - sudo rm -Rf /var/lib/lxd
- - sudo snap install lxd
- - sudo lxd init --auto
- - sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles
- # Move any cached lxd images into lxd's image dir
- - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
- - sudo usermod -a -G lxd $USER
- - sudo sbuild-adduser $USER
- - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
- script:
- # Ubuntu LTS: Build
- - ./packages/bddeb -S -d --release xenial
- - |
- needs_caching=false
- if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then
- # If we have a cached chroot, move it into place
- sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64
- sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64
- # Write its configuration
- cat > sbuild-xenial-amd64 << EOM
- [xenial-amd64]
- description=xenial-amd64
- groups=sbuild,root,admin
- root-groups=sbuild,root,admin
- # Uncomment these lines to allow members of these groups to access
- # the -source chroots directly (useful for automated updates, etc).
- #source-root-users=sbuild,root,admin
- #source-root-groups=sbuild,root,admin
- type=directory
- profile=sbuild
- union-type=overlay
- directory=/var/lib/schroot/chroots/xenial-amd64
- EOM
- sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/
- sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64
- # And ensure it's up-to-date.
- before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)"
- sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
- after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)
- if [ "$before_pkgs" != "$after_pkgs" ]; then
- needs_caching=true
- fi
- else
- # Otherwise, create the chroot
- sudo -E su $USER -c 'mk-sbuild xenial'
- needs_caching=true
- fi
- # If there are changes to the schroot (or it's entirely new),
- # tar up the schroot (to preserve ownership/permissions) and
- # move it into the cached dir; no need to compress it because
- # Travis will do that anyway
- if [ "$needs_caching" = "true" ]; then
- sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 .
- fi
- # Use sudo to get a new shell where we're in the sbuild group
- - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
- # Ubuntu LTS: Integration
- - sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb'
- - name: "Integration Tests (WIP)"
+ - name: "Integration Tests"
if: NOT branch =~ /^ubuntu\//
+ env: {}
cache:
- directories:
- lxd_images
@@ -199,6 +120,7 @@ matrix:
fi
# Use sudo to get a new shell where we're in the sbuild group
- sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
+ - ssh-keygen -P "" -q -f ~/.ssh/id_rsa
- sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' &
- |
SECONDS=0
@@ -220,3 +142,9 @@ matrix:
env: TOXENV=pylint
- python: 3.6
env: TOXENV=doc
+ # Test all supported Python versions (but at the end, so we schedule
+ # longer-running jobs first)
+ - python: 3.9
+ - python: 3.8
+ - python: 3.7
+ - python: 3.5
diff --git a/ChangeLog b/ChangeLog
index 9b41924a..44b50410 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,110 @@
+21.1
+ - Azure: Support for VMs without ephemeral resource disks. (#800)
+ [Johnson Shi] (LP: #1901011)
+ - cc_keys_to_console: add option to disable key emission (#811)
+ [Michael Hudson-Doyle] (LP: #1915460)
+ - integration_tests: introduce lxd_use_exec mark (#802)
+ - azure: case-insensitive UUID to avoid new IID during kernel upgrade
+ (#798) (LP: #1835584)
+ - stale.yml: don't ask submitters to reopen PRs (#816)
+ - integration_tests: fix use of SSH agent within tox (#815)
+ - integration_tests: add UPGRADE CloudInitSource (#812)
+ - integration_tests: use unique MAC addresses for tests (#813)
+ - Update .gitignore (#814)
+ - Port apt cloud_tests to integration tests (#808)
+ - integration_tests: fix test_gh626 on LXD VMs (#809)
+ - Fix attempting to decode binary data in test_seed_random_data test (#806)
+ - Remove wait argument from tests with session_cloud calls (#805)
+ - Datasource for UpCloud (#743) [Antti Myyrä]
+ - test_gh668: fix failure on LXD VMs (#801)
+ - openstack: read the dynamic metadata group vendor_data2.json (#777)
+ [Andrew Bogott] (LP: #1841104)
+ - includedir in suoders can be prefixed by "arroba" (#783)
+ [Jordi Massaguer Pla]
+ - [VMware] change default max wait time to 15s (#774) [xiaofengw-vmware]
+ - Revert integration test associated with reverted #586 (#784)
+ - Add jordimassaguerpla as contributor (#787) [Jordi Massaguer Pla]
+ - Add Rick Harding to CLA signers (#792) [Rick Harding]
+ - HACKING.rst: add clarifying note to LP CLA process section (#789)
+ - Stop linting cloud_tests (#791)
+ - cloud-tests: update cryptography requirement (#790) [Joshua Powers]
+ - Remove 'remove-raise-on-failure' calls from integration_tests (#788)
+ - Use more cloud defaults in integration tests (#757)
+ - Adding self to cla signers (#776) [Andrew Bogott]
+ - doc: avoid two warnings (#781) [Dan Kenigsberg]
+ - Use proper spelling for Red Hat (#778) [Dan Kenigsberg]
+ - Add antonyc to .github-cla-signers (#747) [Anton Chaporgin]
+ - integration_tests: log image serial if available (#772)
+ - [VMware] Support cloudinit raw data feature (#691) [xiaofengw-vmware]
+ - net: Fix static routes to host in eni renderer (#668) [Pavel Abalikhin]
+ - .travis.yml: don't run cloud_tests in CI (#756)
+ - test_upgrade: add some missing commas (#769)
+ - cc_seed_random: update documentation and fix integration test (#771)
+ (LP: #1911227)
+ - Fix test gh-632 test to only run on NoCloud (#770) (LP: #1911230)
+ - archlinux: fix package upgrade command handling (#768) [Bao Trinh]
+ - integration_tests: add integration test for LP: #1910835 (#761)
+ - Fix regression with handling of IMDS ssh keys (#760) [Thomas Stringer]
+ - integration_tests: log cloud-init version in SUT (#758)
+ - Add ajmyyra as contributor (#742) [Antti Myyrä]
+ - net_convert: add some missing help text (#755)
+ - Missing IPV6_AUTOCONF=no to render sysconfig dhcp6 stateful on RHEL
+ (#753) [Eduardo Otubo]
+ - doc: document missing IPv6 subnet types (#744) [Antti Myyrä]
+ - Add example configuration for datasource `AliYun` (#751) [Xiaoyu Zhong]
+ - integration_tests: add SSH key selection settings (#754)
+ - fix a typo in man page cloud-init.1 (#752) [Amy Chen]
+ - network-config-format-v2.rst: add Netplan Passthrough section (#750)
+ - stale: re-enable post holidays (#749)
+ - integration_tests: port ca_certs tests from cloud_tests (#732)
+ - Azure: Add telemetry for poll IMDS (#741) [Johnson Shi]
+ - doc: move testing section from HACKING to its own doc (#739)
+ - No longer allow integration test failures on travis (#738)
+ - stale: fix error in definition (#740)
+ - integration_tests: set log-cli-level to INFO by default (#737)
+ - PULL_REQUEST_TEMPLATE.md: use backticks around commit message (#736)
+ - stale: disable check for holiday break (#735)
+ - integration_tests: log the path we collect logs into (#733)
+ - .travis.yml: add (most) supported Python versions to CI (#734)
+ - integration_tests: fix IN_PLACE CLOUD_INIT_SOURCE (#731)
+ - cc_ca_certs: add RHEL support (#633) [cawamata]
+ - Azure: only generate config for NICs with addresses (#709)
+ [Thomas Stringer]
+ - doc: fix CloudStack configuration example (#707) [Olivier Lemasle]
+ - integration_tests: restrict test_lxd_bridge appropriately (#730)
+ - Add integration tests for CLI functionality (#729)
+ - Integration test for gh-626 (#728)
+ - Some test_upgrade fixes (#726)
+ - Ensure overriding test vars with env vars works for booleans (#727)
+ - integration_tests: port lxd_bridge test from cloud_tests (#718)
+ - Integration test for gh-632. (#725)
+ - Integration test for gh-671 (#724)
+ - integration-requirements.txt: bump pycloudlib commit (#723)
+ - Drop unnecessary shebang from cmd/main.py (#722) [Eduardo Otubo]
+ - Integration test for LP: #1813396 and #669 (#719)
+ - integration_tests: include timestamp in log output (#720)
+ - integration_tests: add test for LP: #1898997 (#713)
+ - Add integration test for power_state_change module (#717)
+ - Update documentation for network-config-format-v2 (#701) [ggiesen]
+ - sandbox CA Cert tests to not require ca-certificates (#715)
+ [Eduardo Otubo]
+ - Add upgrade integration test (#693)
+ - Integration test for 570 (#712)
+ - Add ability to keep snapshotted images in integration tests (#711)
+ - Integration test for pull #586 (#706)
+ - integration_tests: introduce skipping of tests by OS (#702)
+ - integration_tests: introduce IntegrationInstance.restart (#708)
+ - Add lxd-vm to list of valid integration test platforms (#705)
+ - Adding BOOTPROTO = dhcp to render sysconfig dhcp6 stateful on RHEL
+ (#685) [Eduardo Otubo]
+ - Delete image snapshots created for integration tests (#682)
+ - Parametrize ssh_keys_provided integration test (#700) [lucasmoura]
+ - Drop use_sudo attribute on IntegrationInstance (#694) [lucasmoura]
+ - cc_apt_configure: add riscv64 as a ports arch (#687)
+ [Dimitri John Ledkov]
+ - cla: add xnox (#692) [Dimitri John Ledkov]
+ - Collect logs from integration test runs (#675)
+
20.4.1
- Revert "ssh_util: handle non-default AuthorizedKeysFile config (#586)"
@@ -531,7 +638,7 @@
- docs: add additional details to per-instance/once [Joshua Powers]
- Update doc-requirements.txt [Joshua Powers]
- doc-requirements: add missing dep [Joshua Powers]
- - dhcp: Support RedHat dhcp rfc3442 lease format for option 121 (#76)
+ - dhcp: Support Red Hat dhcp rfc3442 lease format for option 121 (#76)
[Eric Lafontaine] (LP: #1850642)
- network_state: handle empty v1 config (#45) (LP: #1852496)
- docs: Add document on how to report bugs [Joshua Powers]
diff --git a/HACKING.rst b/HACKING.rst
index 8a12e3e3..623b3136 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -98,6 +98,11 @@ The cloud-init team will review the two merge proposals and verify that
the CLA has been signed for the Launchpad user and record the
associated GitHub account.
+.. note::
+ If you are a first time contributor, you will not need to touch
+ Launchpad to contribute to cloud-init: all new CLA signatures are
+ handled as part of the GitHub pull request process described above.
+
Do these things for each feature or bug
=======================================
@@ -173,154 +178,11 @@ Cloud Config Modules
* Any new modules should use underscores in any new config options and not
hyphens (e.g. `new_option` and *not* `new-option`).
-.. _unit_testing:
-
-Testing
-------------
-
-cloud-init has both unit tests and integration tests. Unit tests can
-be found in-tree alongside the source code, as well as
-at ``tests/unittests``. Integration tests can be found at
-``tests/integration_tests``. Documentation specifically for integration
-tests can be found on the :ref:`integration_tests` page, but
-the guidelines specified below apply to both types of tests.
-
-cloud-init uses `pytest`_ to run its tests, and has tests written both
-as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
-The following guidelines should be followed:
-
-* For ease of organisation and greater accessibility for developers not
- familiar with pytest, all cloud-init unit tests must be contained
- within test classes
-
- * Put another way, module-level test functions should not be used
-
-* pytest test classes should use `pytest fixtures`_ to share
- functionality instead of inheritance
-
-* As all tests are contained within classes, it is acceptable to mix
- ``TestCase`` test classes and pytest test classes within the same
- test file
-
- * These can be easily distinguished by their definition: pytest
- classes will not use inheritance at all (e.g.
- `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
- subclass (indirectly) from ``TestCase`` (e.g.
- `TestPrependBaseCommands`_)
-
-* pytest tests should use bare ``assert`` statements, to take advantage
- of pytest's `assertion introspection`_
-
- * For ``==`` and other commutative assertions, the expected value
- should be placed before the value under test:
- ``assert expected_value == function_under_test()``
-
-* As we still support Ubuntu 16.04 (Xenial Xerus), we can only use
- pytest features that are available in v2.8.7. This is an
- inexhaustive list of ways in which this may catch you out:
-
- * Support for using ``yield`` in ``pytest.fixture`` functions was
- only introduced in `pytest 3.0`_. Such functions must instead use
- the ``pytest.yield_fixture`` decorator.
-
- * Only the following built-in fixtures are available
- [#fixture-list]_:
-
- * ``cache``
- * ``capfd``
- * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial)
- * ``capsys``
- * ``monkeypatch``
- * ``pytestconfig``
- * ``record_xml_property``
- * ``recwarn``
- * ``tmpdir_factory``
- * ``tmpdir``
-
- * On xenial, the objects returned by the ``tmpdir`` fixture cannot be
- used where paths are required; they are rejected as invalid paths.
- You must instead use their ``.strpath`` attribute.
-
- * For example, instead of
- ``util.write_file(tmpdir.join("some_file"), ...)``, you should
- write ``util.write_file(tmpdir.join("some_file").strpath, ...)``.
-
- * The `pytest.param`_ function cannot be used. It was introduced in
- pytest 3.1, which means it is not available on xenial. The more
- limited mechanism it replaced was removed in pytest 4.0, so is not
- available in focal or later. The only available alternatives are
- to write mark-requiring test instances as completely separate
- tests, without utilising parameterisation, or to apply the mark to
- the entire parameterized test (and therefore every test instance).
-
-* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
- should start with ``m_`` to clearly distinguish them from non-mock
- variables
-
- * For example, ``m_readurl`` (which would be a mock for ``readurl``)
-
-* The ``assert_*`` methods that are available on ``Mock`` and
- ``MagicMock`` objects should be avoided, as typos in these method
- names may not raise ``AttributeError`` (and so can cause tests to
- silently pass). An important exception: if a ``Mock`` is
- `autospecced`_ then misspelled assertion methods *will* raise an
- ``AttributeError``, so these assertion methods may be used on
- autospecced ``Mock`` objects.
-
- For non-autospecced ``Mock`` s, these substitutions can be used
- (``m`` is assumed to be a ``Mock``):
-
- * ``m.assert_any_call(*args, **kwargs)`` => ``assert
- mock.call(*args, **kwargs) in m.call_args_list``
- * ``m.assert_called()`` => ``assert 0 != m.call_count``
- * ``m.assert_called_once()`` => ``assert 1 == m.call_count``
- * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert
- [mock.call(*args, **kwargs)] == m.call_args_list``
- * ``m.assert_called_with(*args, **kwargs)`` => ``assert
- mock.call(*args, **kwargs) == m.call_args_list[-1]``
- * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in
- call_list: assert call in m.call_args_list``
-
- * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(...,
- any_order=False)`` are not easily replicated in a single
- statement, so their use when appropriate is acceptable.
-
- * ``m.assert_not_called()`` => ``assert 0 == m.call_count``
-
-* Test arguments should be ordered as follows:
-
- * ``mock.patch`` arguments. When used as a decorator, ``mock.patch``
- partially applies its generated ``Mock`` object as the first
- argument, so these arguments must go first.
- * ``pytest.mark.parametrize`` arguments, in the order specified to
- the ``parametrize`` decorator. These arguments are also provided
- by a decorator, so it's natural that they sit next to the
- ``mock.patch`` arguments.
- * Fixture arguments, alphabetically. These are not provided by a
- decorator, so they are last, and their order has no defined
- meaning, so we default to alphabetical.
-
-* It follows from this ordering of test arguments (so that we retain
- the property that arguments left-to-right correspond to decorators
- bottom-to-top) that test decorators should be ordered as follows:
-
- * ``pytest.mark.parametrize``
- * ``mock.patch``
-
-* When there are multiple patch calls in a test file for the module it
- is testing, it may be desirable to capture the shared string prefix
- for these patch calls in a module-level variable. If used, such
- variables should be named ``M_PATH`` or, for datasource tests,
- ``DS_PATH``.
-
-.. _pytest: https://docs.pytest.org/
-.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
-.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
-.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9
-.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
-.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
-.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param
-.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
+Tests
+-----
+
+Submissions to cloud-init must include testing. See :ref:`testing` for
+details on these requirements.
Type Annotations
----------------
@@ -344,13 +206,6 @@ variable annotations specified in `PEP-526`_ were introduced in Python
.. _PEP-484: https://www.python.org/dev/peps/pep-0484/
.. _PEP-526: https://www.python.org/dev/peps/pep-0526/
-.. [#fixture-list] This list of fixtures (with markup) can be
- reproduced by running::
-
- py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/'
-
- in a xenial lxd container with python3-pytest-catchlog installed.
-
Feature Flags
-------------
diff --git a/cloud-tests-requirements.txt b/cloud-tests-requirements.txt
index b4cd18d5..eecab63e 100644
--- a/cloud-tests-requirements.txt
+++ b/cloud-tests-requirements.txt
@@ -10,7 +10,7 @@ boto3==1.14.53
# ssh communication
paramiko==2.7.2
-cryptography==3.1
+cryptography==3.2
# lxd backend
pylxd==2.2.11
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 9bded16c..25f254e3 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -39,6 +39,7 @@ KNOWN_CLOUD_NAMES = [
'SAP Converged Cloud',
'Scaleway',
'SmartOS',
+ 'UpCloud',
'VMware',
'ZStack',
'Other'
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 80d217ca..0668ffa3 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -28,11 +28,13 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument("-p", "--network-data", type=open,
- metavar="PATH", required=True)
+ metavar="PATH", required=True,
+ help="The network configuration to read")
parser.add_argument("-k", "--kind",
choices=['eni', 'network_data.json', 'yaml',
'azure-imds', 'vmware-imc'],
- required=True)
+ required=True,
+ help="The format of the given network config")
parser.add_argument("-d", "--directory",
metavar="PATH",
help="directory to place output in",
@@ -50,7 +52,8 @@ def get_parser(parser=None):
help='enable debug logging to stderr.')
parser.add_argument("-O", "--output-kind",
choices=['eni', 'netplan', 'sysconfig'],
- required=True)
+ required=True,
+ help="The network config format to emit")
return parser
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index a5446da7..baf1381f 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 585b3b0e..78b27441 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -127,7 +127,8 @@ class TestMain(FilesystemMockingTestCase):
'syslog_fix_perms': [
'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
],
- 'vendor_data': {'enabled': True, 'prefix': []}})
+ 'vendor_data': {'enabled': True, 'prefix': []},
+ 'vendor_data2': {'enabled': True, 'prefix': []}})
updated_cfg.pop('system_info')
self.assertEqual(updated_cfg, cfg)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 73d8719f..bb8a1278 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -389,7 +389,7 @@ PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
"SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
PRIMARY_ARCHES = ['amd64', 'i386']
-PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
+PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64']
def get_default_mirrors(arch=None, target=None):
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 3c453d91..bd7bead9 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -25,7 +25,7 @@ can be removed from the system with the configuration option
**Module frequency:** per instance
-**Supported distros:** alpine, debian, ubuntu
+**Supported distros:** alpine, debian, ubuntu, rhel
**Config keys**::
@@ -44,60 +44,104 @@ import os
from cloudinit import subp
from cloudinit import util
-CA_CERT_PATH = "/usr/share/ca-certificates/"
-CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
-CA_CERT_CONFIG = "/etc/ca-certificates.conf"
-CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
-CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
+DEFAULT_CONFIG = {
+ 'ca_cert_path': '/usr/share/ca-certificates/',
+ 'ca_cert_filename': 'cloud-init-ca-certs.crt',
+ 'ca_cert_config': '/etc/ca-certificates.conf',
+ 'ca_cert_system_path': '/etc/ssl/certs/',
+ 'ca_cert_update_cmd': ['update-ca-certificates']
+}
+DISTRO_OVERRIDES = {
+ 'rhel': {
+ 'ca_cert_path': '/usr/share/pki/ca-trust-source/',
+ 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt',
+ 'ca_cert_config': None,
+ 'ca_cert_system_path': '/etc/pki/ca-trust/',
+ 'ca_cert_update_cmd': ['update-ca-trust']
+ }
+}
-distros = ['alpine', 'debian', 'ubuntu']
+distros = ['alpine', 'debian', 'ubuntu', 'rhel']
-def update_ca_certs():
+
+def _distro_ca_certs_configs(distro_name):
+ """Return a distro-specific ca_certs config dictionary
+
+ @param distro_name: String providing the distro class name.
+ @returns: Dict of distro configurations for ca-cert.
+ """
+ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG)
+ cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'],
+ cfg['ca_cert_filename'])
+ return cfg
+
+
+def update_ca_certs(distro_cfg):
"""
Updates the CA certificate cache on the current machine.
+
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- subp.subp(["update-ca-certificates"], capture=False)
+ subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False)
-def add_ca_certs(certs):
+def add_ca_certs(distro_cfg, certs):
"""
Adds certificates to the system. To actually apply the new certificates
you must also call L{update_ca_certs}.
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
@param certs: A list of certificate strings.
"""
- if certs:
- # First ensure they are strings...
- cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
-
- if os.stat(CA_CERT_CONFIG).st_size == 0:
- # If the CA_CERT_CONFIG file is empty (i.e. all existing
- # CA certs have been deleted) then simply output a single
- # line with the cloud-init cert filename.
- out = "%s\n" % CA_CERT_FILENAME
- else:
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([line for line in orig.splitlines()
- if line != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
- util.write_file(CA_CERT_CONFIG, out, omode="wb")
-
-
-def remove_default_ca_certs(distro_name):
+ if not certs:
+ return
+ # First ensure they are strings...
+ cert_file_contents = "\n".join([str(c) for c in certs])
+ util.write_file(distro_cfg['ca_cert_full_path'],
+ cert_file_contents,
+ mode=0o644)
+ update_cert_config(distro_cfg)
+
+
+def update_cert_config(distro_cfg):
+ """
+ Update Certificate config file to add the file path managed cloud-init
+
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
+ """
+ if distro_cfg['ca_cert_config'] is None:
+ return
+ if os.stat(distro_cfg['ca_cert_config']).st_size == 0:
+ # If the CA_CERT_CONFIG file is empty (i.e. all existing
+ # CA certs have been deleted) then simply output a single
+ # line with the cloud-init cert filename.
+ out = "%s\n" % distro_cfg['ca_cert_filename']
+ else:
+ # Append cert filename to CA_CERT_CONFIG file.
+ # We have to strip the content because blank lines in the file
+ # causes subsequent entries to be ignored. (LP: #1077020)
+ orig = util.load_file(distro_cfg['ca_cert_config'])
+ cr_cont = '\n'.join([line for line in orig.splitlines()
+ if line != distro_cfg['ca_cert_filename']])
+ out = "%s\n%s\n" % (cr_cont.rstrip(),
+ distro_cfg['ca_cert_filename'])
+ util.write_file(distro_cfg['ca_cert_config'], out, omode="wb")
+
+
+def remove_default_ca_certs(distro_name, distro_cfg):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
+
+ @param distro_name: String providing the distro class name.
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- util.delete_dir_contents(CA_CERT_PATH)
- util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0o644)
+ util.delete_dir_contents(distro_cfg['ca_cert_path'])
+ util.delete_dir_contents(distro_cfg['ca_cert_system_path'])
+ util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644)
- if distro_name != 'alpine':
+ if distro_name in ['debian', 'ubuntu']:
debconf_sel = (
"ca-certificates ca-certificates/trust_new_crts " + "select no")
subp.subp(('debconf-set-selections', '-'), debconf_sel)
@@ -120,22 +164,23 @@ def handle(name, cfg, cloud, log, _args):
return
ca_cert_cfg = cfg['ca-certs']
+ distro_cfg = _distro_ca_certs_configs(cloud.distro.name)
# If there is a remove-defaults option set to true, remove the system
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs(cloud.distro.name)
+ remove_default_ca_certs(cloud.distro.name, distro_cfg)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs:
log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
+ add_ca_certs(distro_cfg, trusted_certs)
# Update the system with the new cert configuration.
log.debug("Updating certificates")
- update_ca_certs()
+ update_ca_certs(distro_cfg)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 0f2be52b..d72b5244 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -9,14 +9,17 @@
"""
Keys to Console
---------------
-**Summary:** control which SSH keys may be written to console
-
-For security reasons it may be desirable not to write SSH fingerprints and keys
-to the console. To avoid the fingerprint of types of SSH keys being written to
-console the ``ssh_fp_console_blacklist`` config key can be used. By default all
-types of keys will have their fingerprints written to console. To avoid keys
-of a key type being written to console the ``ssh_key_console_blacklist`` config
-key can be used. By default ``ssh-dss`` keys are not written to console.
+**Summary:** control which SSH host keys may be written to console
+
+For security reasons it may be desirable not to write SSH host keys and their
+fingerprints to the console. To avoid either being written to the console the
+``emit_keys_to_console`` config key under the main ``ssh`` config key can be
+used. To avoid the fingerprint of types of SSH host keys being written to
+console the ``ssh_fp_console_blacklist`` config key can be used. By default
+all types of keys will have their fingerprints written to console. To avoid
+host keys of a key type being written to console the
+``ssh_key_console_blacklist`` config key can be used. By default ``ssh-dss``
+host keys are not written to console.
**Internal name:** ``cc_keys_to_console``
@@ -26,6 +29,9 @@ key can be used. By default ``ssh-dss`` keys are not written to console.
**Config keys**::
+ ssh:
+ emit_keys_to_console: false
+
ssh_fp_console_blacklist: <list of key types>
ssh_key_console_blacklist: <list of key types>
"""
@@ -51,6 +57,11 @@ def _get_helper_tool_path(distro):
def handle(name, cfg, cloud, log, _args):
+ if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)):
+ log.debug(("Skipping module named %s, "
+ "logging of SSH host keys disabled"), name)
+ return
+
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
log.warning(("Unable to activate module %s,"
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 7beb11ca..466dad03 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -14,12 +14,12 @@ Resolv Conf
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
where configuration management such as puppet or chef own dns configuration.
-As Debian/Ubuntu will, by default, utilize resolvconf, and similarly RedHat
+As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat
will use sysconfig, this module is likely to be of little use unless those
are configured correctly.
.. note::
- For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
+ For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP
enabled NICs.
.. note::
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 28d62e9d..693317c2 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -5,15 +5,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""
-RedHat Subscription
--------------------
+Red Hat Subscription
+--------------------
**Summary:** register red hat enterprise linux based system
-Register a RedHat system either by username and password *or* activation and
+Register a Red Hat system either by username and password *or* activation and
org. Following a sucessful registration, you can auto-attach subscriptions, set
the service level, add subscriptions based on pool id, enable/disable yum
repositories based on repo id, and alter the rhsm_baseurl and server-hostname
-in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register RedHat
+in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register Red Hat
Subscription`` example config.
**Internal name:** ``cc_rh_subscription``
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 4fb9b44e..911789c7 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -24,15 +24,19 @@ Configuration for this module is under the ``random_seed`` config key. The
optionally be specified in encoded form, with the encoding specified in
``encoding``.
+If the cloud provides its own random seed data, it will be appended to ``data``
+before it is written to ``file``.
+
.. note::
when using a multiline value for ``data`` or specifying binary data, be
sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format
specifiers when appropriate
-Instead of specifying a data string, a command can be run to generate/collect
-the data to be written. The command should be specified as a list of args in
-the ``command`` key. If a command is specified that cannot be run, no error
-will be reported unless ``command_required`` is set to true.
+If the ``command`` key is specified, the given command will be executed. This
+will happen after ``file`` has been populated. That command's environment will
+contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is
+specified that cannot be run, no error will be reported unless
+``command_required`` is set to true.
For example, to use ``pollinate`` to gather data from a
remote entropy server and write it to ``/dev/urandom``, the following could be
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 1d23d80d..d4017478 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -18,8 +18,8 @@ A hostname and fqdn can be provided by specifying a full domain name under the
``fqdn`` key. Alternatively, a hostname can be specified using the ``hostname``
key, and the fqdn of the cloud wil be used. If a fqdn specified with the
``hostname`` key, it will be handled properly, although it is better to use
-the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn``
-will be used.
+the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set,
+it is distro dependent whether ``hostname`` or ``fqdn`` is used.
This module will run in the init-local stage before networking is configured
if the hostname is set by metadata or user data on the local system.
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index d6b5682d..433de751 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -78,7 +78,6 @@ password.
"""
import re
-import sys
from cloudinit.distros import ug_util
from cloudinit import log as logging
@@ -214,7 +213,9 @@ def handle(_name, cfg, cloud, log, args):
if len(randlist):
blurb = ("Set the following 'random' passwords\n",
'\n'.join(randlist))
- sys.stderr.write("%s\n%s\n" % blurb)
+ util.multi_log(
+ "%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False
+ )
if expire:
expired_users = []
diff --git a/cloudinit/config/tests/test_keys_to_console.py b/cloudinit/config/tests/test_keys_to_console.py
new file mode 100644
index 00000000..4083fc54
--- /dev/null
+++ b/cloudinit/config/tests/test_keys_to_console.py
@@ -0,0 +1,34 @@
+"""Tests for cc_keys_to_console."""
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_keys_to_console
+
+
+class TestHandle:
+ """Tests for cloudinit.config.cc_keys_to_console.handle.
+
+ TODO: These tests only cover the emit_keys_to_console config option, they
+ should be expanded to cover the full functionality.
+ """
+
+ @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log")
+ @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists")
+ @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp")
+ @pytest.mark.parametrize("cfg,subp_called", [
+ ({}, True), # Default to emitting keys
+ ({"ssh": {}}, True), # Default even if we have the parent key
+ ({"ssh": {"emit_keys_to_console": True}}, True), # Explicitly enabled
+ ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled
+ ])
+ def test_emit_keys_to_console_config(
+ self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called
+ ):
+ # Ensure we always find the helper
+ m_path_exists.return_value = True
+ m_subp.return_value = ("", "")
+
+ cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ())
+
+ assert subp_called == (m_subp.call_count == 1)
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index daa1ef51..bbe2ee8f 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -74,10 +74,6 @@ class TestSetPasswordsHandle(CiTestCase):
with_logs = True
- def setUp(self):
- super(TestSetPasswordsHandle, self).setUp()
- self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err')
-
def test_handle_on_empty_config(self, *args):
"""handle logs that no password has changed when config is empty."""
cloud = self.tmp_cloud(distro='ubuntu')
@@ -129,10 +125,12 @@ class TestSetPasswordsHandle(CiTestCase):
mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
m_subp.call_args_list)
+ @mock.patch(MODPATH + "util.multi_log")
@mock.patch(MODPATH + "util.is_BSD")
@mock.patch(MODPATH + "subp.subp")
- def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
- m_is_bsd):
+ def test_handle_on_chpasswd_list_creates_random_passwords(
+ self, m_subp, m_is_bsd, m_multi_log
+ ):
"""handle parses command set random passwords."""
m_is_bsd.return_value = False
cloud = self.tmp_cloud(distro='ubuntu')
@@ -146,10 +144,32 @@ class TestSetPasswordsHandle(CiTestCase):
self.assertIn(
'DEBUG: Handling input for chpasswd as list.',
self.logs.getvalue())
- self.assertNotEqual(
- [mock.call(['chpasswd'],
- '\n'.join(valid_random_pwds) + '\n')],
- m_subp.call_args_list)
+
+ self.assertEqual(1, m_subp.call_count)
+ args, _kwargs = m_subp.call_args
+ self.assertEqual(["chpasswd"], args[0])
+
+ stdin = args[1]
+ user_pass = {
+ user: password
+ for user, password
+ in (line.split(":") for line in stdin.splitlines())
+ }
+
+ self.assertEqual(1, m_multi_log.call_count)
+ self.assertEqual(
+ mock.call(mock.ANY, stderr=False, fallback_to_stdout=False),
+ m_multi_log.call_args
+ )
+
+ self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys()))
+ written_lines = m_multi_log.call_args[0][0].splitlines()
+ for password in user_pass.values():
+ for line in written_lines:
+ if password in line:
+ break
+ else:
+ self.fail("Password not emitted to console")
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 1e118472..220bd11f 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -673,7 +673,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
found_include = False
for line in sudoers_contents.splitlines():
line = line.strip()
- include_match = re.search(r"^#includedir\s+(.*)$", line)
+ include_match = re.search(r"^[#|@]includedir\s+(.*)$", line)
if not include_match:
continue
included_dir = include_match.group(1).strip()
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 967be168..f8385f7f 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
+ locale_gen_fn = "/etc/locale.gen"
network_conf_dir = "/etc/netctl"
resolve_conf_fn = "/etc/resolv.conf"
init_cmd = ['systemctl'] # init scripts
@@ -43,16 +43,20 @@ class Distro(distros.Distro):
cfg['ssh_svcname'] = 'sshd'
def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- subp.subp(['locale-gen', '-G', locale], capture=False)
- # "" provides trailing newline during join
+ if out_fn is not None and out_fn != "/etc/locale.conf":
+ LOG.warning("Invalid locale_configfile %s, only supported "
+ "value is /etc/locale.conf", out_fn)
lines = [
util.make_header(),
- 'LANG="%s"' % (locale),
+ # Hard-coding the charset isn't ideal, but there is no other way.
+ '%s UTF-8' % (locale),
"",
]
- util.write_file(out_fn, "\n".join(lines))
+ util.write_file(self.locale_gen_fn, "\n".join(lines))
+ subp.subp(['locale-gen'], capture=False)
+ # In the future systemd can handle locale-gen stuff:
+ # https://github.com/systemd/systemd/pull/9864
+ subp.subp(['localectl', 'set-locale', locale], capture=False)
def install_packages(self, pkglist):
self.update_package_sources()
@@ -137,6 +141,17 @@ class Distro(distros.Distro):
return default
return hostname
+ # hostname (inetutils) isn't installed per default on arch, so we use
+ # hostnamectl which is installed per default (systemd).
+ def _apply_hostname(self, hostname):
+ LOG.debug("Non-persistently setting the system hostname to %s",
+ hostname)
+ try:
+ subp.subp(['hostnamectl', '--transient', 'set-hostname', hostname])
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, "Failed to non-persistently adjust the system "
+ "hostname to %s", hostname)
+
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
@@ -152,6 +167,8 @@ class Distro(distros.Distro):
elif args and isinstance(args, list):
cmd.extend(args)
+ if command == "upgrade":
+ command = "-u"
if command:
cmd.append(command)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 9752ad28..fc5011ec 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -230,6 +230,10 @@ class ConfigMerger(object):
cc_paths = ['cloud_config']
if self._include_vendor:
+ # the order is important here: we want vendor2
+ # (dynamic vendor data from OpenStack)
+ # to override vendor (static data from OpenStack)
+ cc_paths.append('vendor2_cloud_config')
cc_paths.append('vendor_cloud_config')
for cc_p in cc_paths:
@@ -337,9 +341,12 @@ class Paths(object):
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
"vendor_cloud_config": "vendor-cloud-config.txt",
+ "vendor2_cloud_config": "vendor2-cloud-config.txt",
"data": "data",
"vendordata_raw": "vendor-data.txt",
+ "vendordata2_raw": "vendor-data2.txt",
"vendordata": "vendor-data.txt.i",
+ "vendordata2": "vendor-data2.txt.i",
"instance_id": ".instance-id",
"manual_clean_marker": "manual-clean",
"warnings": "warnings",
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index de65e7af..385b7bcc 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -6,6 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
+import functools
import ipaddress
import logging
import os
@@ -19,6 +20,19 @@ from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
SYS_CLASS_NET = "/sys/class/net/"
DEFAULT_PRIMARY_INTERFACE = 'eth0'
+OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [
+ "ovs-vsctl",
+ "--format",
+ "csv",
+ "--no-headings",
+ "--timeout",
+ "10",
+ "--columns",
+ "name",
+ "find",
+ "interface",
+ "type=internal",
+]
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
@@ -133,6 +147,52 @@ def master_is_openvswitch(devname):
return os.path.exists(ovs_path)
+@functools.lru_cache(maxsize=None)
+def openvswitch_is_installed() -> bool:
+ """Return a bool indicating if Open vSwitch is installed in the system."""
+ ret = bool(subp.which("ovs-vsctl"))
+ if not ret:
+ LOG.debug(
+ "ovs-vsctl not in PATH; not detecting Open vSwitch interfaces"
+ )
+ return ret
+
+
+@functools.lru_cache(maxsize=None)
+def get_ovs_internal_interfaces() -> list:
+ """Return a list of the names of OVS internal interfaces on the system.
+
+ These will all be strings, and are used to exclude OVS-specific interface
+ from cloud-init's network configuration handling.
+ """
+ try:
+ out, _err = subp.subp(OVS_INTERNAL_INTERFACE_LOOKUP_CMD)
+ except subp.ProcessExecutionError as exc:
+ if "database connection failed" in exc.stderr:
+ LOG.info(
+ "Open vSwitch is not yet up; no interfaces will be detected as"
+ " OVS-internal"
+ )
+ return []
+ raise
+ else:
+ return out.splitlines()
+
+
+def is_openvswitch_internal_interface(devname: str) -> bool:
+ """Returns True if this is an OVS internal interface.
+
+ If OVS is not installed or not yet running, this will return False.
+ """
+ if not openvswitch_is_installed():
+ return False
+ ovs_bridges = get_ovs_internal_interfaces()
+ if devname in ovs_bridges:
+ LOG.debug("Detected %s as an OVS interface", devname)
+ return True
+ return False
+
+
def is_netfailover(devname, driver=None):
""" netfailover driver uses 3 nics, master, primary and standby.
this returns True if the device is either the primary or standby
@@ -884,6 +944,8 @@ def get_interfaces(blacklist_drivers=None) -> list:
# skip nics that have no mac (00:00....)
if name != 'lo' and mac == zero_mac[:len(mac)]:
continue
+ if is_openvswitch_internal_interface(name):
+ continue
# skip nics that have drivers blacklisted
driver = device_driver(name)
if driver in blacklist_drivers:
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 0074691b..a89e5ad2 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -387,6 +387,8 @@ class Renderer(renderer.Renderer):
if k == 'network':
if ':' in route[k]:
route_line += ' -A inet6'
+ elif route.get('prefix') == 32:
+ route_line += ' -host'
else:
route_line += ' -net'
if 'prefix' in route:
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index a930e612..99a4bae4 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -396,6 +396,13 @@ class Renderer(renderer.Renderer):
# Only IPv6 is DHCP, IPv4 may be static
iface_cfg['BOOTPROTO'] = 'dhcp6'
iface_cfg['DHCLIENT6_MODE'] = 'managed'
+ # only if rhel AND dhcpv6 stateful
+ elif (flavor == 'rhel' and
+ subnet_type == 'ipv6_dhcpv6-stateful'):
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ iface_cfg['DHCPV6C'] = True
+ iface_cfg['IPV6INIT'] = True
+ iface_cfg['IPV6_AUTOCONF'] = False
else:
iface_cfg['IPV6INIT'] = True
# Configure network settings using DHCPv6
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 0535387a..946f8ee2 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -391,6 +391,10 @@ class TestGetDeviceList(CiTestCase):
self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist())
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestGetInterfaceMAC(CiTestCase):
def setUp(self):
@@ -1224,6 +1228,121 @@ class TestNetFailOver(CiTestCase):
self.assertFalse(net.is_netfailover(devname, driver))
+class TestOpenvswitchIsInstalled:
+ """Test cloudinit.net.openvswitch_is_installed.
+
+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test
+ despite the ``lru_cache`` decorator on the unit under test.
+ """
+
+ @pytest.fixture(autouse=True)
+ def clear_lru_cache(self):
+ net.openvswitch_is_installed.cache_clear()
+
+ @pytest.mark.parametrize(
+ "expected,which_return", [(True, "/some/path"), (False, None)]
+ )
+ @mock.patch("cloudinit.net.subp.which")
+ def test_mirrors_which_result(self, m_which, expected, which_return):
+ m_which.return_value = which_return
+ assert expected == net.openvswitch_is_installed()
+
+ @mock.patch("cloudinit.net.subp.which")
+ def test_only_calls_which_once(self, m_which):
+ net.openvswitch_is_installed()
+ net.openvswitch_is_installed()
+ assert 1 == m_which.call_count
+
+
+@mock.patch("cloudinit.net.subp.subp", return_value=("", ""))
+class TestGetOVSInternalInterfaces:
+ """Test cloudinit.net.get_ovs_internal_interfaces.
+
+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test
+ despite the ``lru_cache`` decorator on the unit under test.
+ """
+ @pytest.fixture(autouse=True)
+ def clear_lru_cache(self):
+ net.get_ovs_internal_interfaces.cache_clear()
+
+ def test_command_used(self, m_subp):
+ """Test we use the correct command when we call subp"""
+ net.get_ovs_internal_interfaces()
+
+ assert [
+ mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD)
+ ] == m_subp.call_args_list
+
+ def test_subp_contents_split_and_returned(self, m_subp):
+ """Test that the command output is appropriately mangled."""
+ stdout = "iface1\niface2\niface3\n"
+ m_subp.return_value = (stdout, "")
+
+ assert [
+ "iface1",
+ "iface2",
+ "iface3",
+ ] == net.get_ovs_internal_interfaces()
+
+ def test_database_connection_error_handled_gracefully(self, m_subp):
+ """Test that the error indicating OVS is down is handled gracefully."""
+ m_subp.side_effect = ProcessExecutionError(
+ stderr="database connection failed"
+ )
+
+ assert [] == net.get_ovs_internal_interfaces()
+
+ def test_other_errors_raised(self, m_subp):
+ """Test that only database connection errors are handled."""
+ m_subp.side_effect = ProcessExecutionError()
+
+ with pytest.raises(ProcessExecutionError):
+ net.get_ovs_internal_interfaces()
+
+ def test_only_runs_once(self, m_subp):
+ """Test that we cache the value."""
+ net.get_ovs_internal_interfaces()
+ net.get_ovs_internal_interfaces()
+
+ assert 1 == m_subp.call_count
+
+
+@mock.patch("cloudinit.net.get_ovs_internal_interfaces")
+@mock.patch("cloudinit.net.openvswitch_is_installed")
+class TestIsOpenVSwitchInternalInterface:
+ def test_false_if_ovs_not_installed(
+ self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces
+ ):
+ """Test that OVS' absence returns False."""
+ m_openvswitch_is_installed.return_value = False
+
+ assert not net.is_openvswitch_internal_interface("devname")
+
+ @pytest.mark.parametrize(
+ "detected_interfaces,devname,expected_return",
+ [
+ ([], "devname", False),
+ (["notdevname"], "devname", False),
+ (["devname"], "devname", True),
+ (["some", "other", "devices", "and", "ours"], "ours", True),
+ ],
+ )
+ def test_return_value_based_on_detected_interfaces(
+ self,
+ m_openvswitch_is_installed,
+ m_get_ovs_internal_interfaces,
+ detected_interfaces,
+ devname,
+ expected_return,
+ ):
+ """Test that the detected interfaces are used correctly."""
+ m_openvswitch_is_installed.return_value = True
+ m_get_ovs_internal_interfaces.return_value = detected_interfaces
+ assert expected_return == net.is_openvswitch_internal_interface(
+ devname
+ )
+
+
class TestIsIpAddress:
"""Tests for net.is_ip_address.
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index ca4ffa8e..91e1bfe7 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -41,6 +41,7 @@ CFG_BUILTIN = {
'Oracle',
'Exoscale',
'RbxCloud',
+ 'UpCloud',
# At the end to act as a 'catch' when none of the above work...
'None',
],
@@ -56,6 +57,7 @@ CFG_BUILTIN = {
'network': {'renderers': None},
},
'vendor_data': {'enabled': True, 'prefix': []},
+ 'vendor_data2': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 04ff2131..6cae9e82 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -78,17 +78,15 @@ AGENT_SEED_DIR = '/var/lib/waagent'
# In the event where the IMDS primary server is not
# available, it takes 1s to fallback to the secondary one
IMDS_TIMEOUT_IN_SECONDS = 2
-IMDS_URL = "http://169.254.169.254/metadata/"
-IMDS_VER = "2019-06-01"
-IMDS_VER_PARAM = "api-version={}".format(IMDS_VER)
+IMDS_URL = "http://169.254.169.254/metadata"
+IMDS_VER_MIN = "2019-06-01"
+IMDS_VER_WANT = "2020-09-01"
class metadata_type(Enum):
- compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM)
- network = "{}instance/network?{}".format(IMDS_URL,
- IMDS_VER_PARAM)
- reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL,
- IMDS_VER_PARAM)
+ compute = "{}/instance".format(IMDS_URL)
+ network = "{}/instance/network".format(IMDS_URL)
+ reprovisiondata = "{}/reprovisiondata".format(IMDS_URL)
PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
@@ -270,7 +268,7 @@ BUILTIN_DS_CONFIG = {
}
# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
-BUILTIN_CLOUD_CONFIG = {
+BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = {
'disk_setup': {
'ephemeral0': {'table_type': 'gpt',
'layout': [100],
@@ -349,6 +347,8 @@ class DataSourceAzure(sources.DataSource):
self.update_events['network'].add(EventType.BOOT)
self._ephemeral_dhcp_ctx = None
+ self.failed_desired_api_version = False
+
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
@@ -520,8 +520,10 @@ class DataSourceAzure(sources.DataSource):
self._wait_for_all_nics_ready()
ret = self._reprovision()
- imds_md = get_metadata_from_imds(
- self.fallback_interface, retries=10)
+ imds_md = self.get_imds_data_with_api_fallback(
+ self.fallback_interface,
+ retries=10
+ )
(md, userdata_raw, cfg, files) = ret
self.seed = cdev
crawled_data.update({
@@ -618,8 +620,26 @@ class DataSourceAzure(sources.DataSource):
maybe_remove_ubuntu_network_config_scripts()
# Process crawled data and augment with various config defaults
- self.cfg = util.mergemanydict(
- [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
+
+ # Only merge in default cloud config related to the ephemeral disk
+ # if the ephemeral disk exists
+ devpath = RESOURCE_DISK_PATH
+ if os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug)
+ self.cfg = util.mergemanydict(
+ [crawled_data['cfg'], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG])
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist. "
+ "Not merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug)
+ self.cfg = crawled_data['cfg']
+
self._metadata_imds = crawled_data['metadata']['imds']
self.metadata = util.mergemanydict(
[crawled_data['metadata'], DEFAULT_METADATA])
@@ -634,6 +654,57 @@ class DataSourceAzure(sources.DataSource):
self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
return True
+ @azure_ds_telemetry_reporter
+ def get_imds_data_with_api_fallback(
+ self,
+ fallback_nic,
+ retries,
+ md_type=metadata_type.compute):
+ """
+ Wrapper for get_metadata_from_imds so that we can have flexibility
+ in which IMDS api-version we use. If a particular instance of IMDS
+ does not have the api version that is desired, we want to make
+ this fault tolerant and fall back to a good known minimum api
+ version.
+ """
+
+ if not self.failed_desired_api_version:
+ for _ in range(retries):
+ try:
+ LOG.info(
+ "Attempting IMDS api-version: %s",
+ IMDS_VER_WANT
+ )
+ return get_metadata_from_imds(
+ fallback_nic=fallback_nic,
+ retries=0,
+ md_type=md_type,
+ api_version=IMDS_VER_WANT
+ )
+ except UrlError as err:
+ LOG.info(
+ "UrlError with IMDS api-version: %s",
+ IMDS_VER_WANT
+ )
+ if err.code == 400:
+ log_msg = "Fall back to IMDS api-version: {}".format(
+ IMDS_VER_MIN
+ )
+ report_diagnostic_event(
+ log_msg,
+ logger_func=LOG.info
+ )
+ self.failed_desired_api_version = True
+ break
+
+ LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN)
+ return get_metadata_from_imds(
+ fallback_nic=fallback_nic,
+ retries=retries,
+ md_type=md_type,
+ api_version=IMDS_VER_MIN
+ )
+
def device_name_to_device(self, name):
return self.ds_cfg['disk_aliases'].get(name)
@@ -651,6 +722,10 @@ class DataSourceAzure(sources.DataSource):
LOG.debug('Retrieving public SSH keys')
ssh_keys = []
try:
+ raise KeyError(
+ "Not using public SSH keys from IMDS"
+ )
+ # pylint:disable=unreachable
ssh_keys = [
public_key['keyData']
for public_key
@@ -679,10 +754,18 @@ class DataSourceAzure(sources.DataSource):
def _iid(self, previous=None):
prev_iid_path = os.path.join(
self.paths.get_cpath('data'), 'instance-id')
- iid = dmi.read_dmi_data('system-uuid')
+ # Older kernels than 4.15 will have UPPERCASE product_uuid.
+ # We don't want Azure to react to an UPPER/lower difference as a new
+ # instance id as it rewrites SSH host keys.
+ # LP: #1835584
+ iid = dmi.read_dmi_data('system-uuid').lower()
if os.path.exists(prev_iid_path):
previous = util.load_file(prev_iid_path).strip()
- if is_byte_swapped(previous, iid):
+ if previous.lower() == iid:
+ # If uppercase/lowercase equivalent, return the previous value
+ # to avoid new instance id.
+ return previous
+ if is_byte_swapped(previous.lower(), iid):
return previous
return iid
@@ -850,10 +933,11 @@ class DataSourceAzure(sources.DataSource):
# primary nic is being attached first helps here. Otherwise each nic
# could add several seconds of delay.
try:
- imds_md = get_metadata_from_imds(
+ imds_md = self.get_imds_data_with_api_fallback(
ifname,
5,
- metadata_type.network)
+ metadata_type.network
+ )
except Exception as e:
LOG.warning(
"Failed to get network metadata using nic %s. Attempt to "
@@ -983,10 +1067,14 @@ class DataSourceAzure(sources.DataSource):
if nl_sock:
nl_sock.close()
+ @azure_ds_telemetry_reporter
def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
- url = metadata_type.reprovisiondata.value
+ url = "{}?api-version={}".format(
+ metadata_type.reprovisiondata.value,
+ IMDS_VER_MIN
+ )
headers = {"Metadata": "true"}
nl_sock = None
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
@@ -1271,6 +1359,10 @@ class DataSourceAzure(sources.DataSource):
pubkey_info = None
try:
+ raise KeyError(
+ "Not using public SSH keys from IMDS"
+ )
+ # pylint:disable=unreachable
public_keys = self.metadata['imds']['compute']['publicKeys']
LOG.debug(
'Successfully retrieved %s key(s) from IMDS',
@@ -1451,26 +1543,17 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
@azure_ds_telemetry_reporter
-def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
+def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH,
is_new_instance=False, preserve_ntfs=False):
- # wait for ephemeral disk to come up
- naplen = .2
- with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter
- ):
- missing = util.wait_for_files([devpath],
- maxwait=maxwait,
- naplen=naplen,
- log_pre="Azure ephemeral disk: ")
-
- if missing:
- report_diagnostic_event(
- "ephemeral device '%s' did not appear after %d seconds." %
- (devpath, maxwait),
- logger_func=LOG.warning)
- return
+ if not os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist." % devpath,
+ logger_func=LOG.debug)
+ return
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists." % devpath,
+ logger_func=LOG.debug)
result = False
msg = None
@@ -1969,6 +2052,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
netconfig = {'version': 2, 'ethernets': {}}
network_metadata = imds_metadata['network']
for idx, intf in enumerate(network_metadata['interface']):
+ has_ip_address = False
# First IPv4 and/or IPv6 address will be obtained via DHCP.
# Any additional IPs of each type will be set as static
# addresses.
@@ -1978,6 +2062,11 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
'dhcp6': False}
for addr_type in ('ipv4', 'ipv6'):
addresses = intf.get(addr_type, {}).get('ipAddress', [])
+ # If there are no available IP addresses, then we don't
+ # want to add this interface to the generated config.
+ if not addresses:
+ continue
+ has_ip_address = True
if addr_type == 'ipv4':
default_prefix = '24'
else:
@@ -1998,7 +2087,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
dev_config['addresses'].append(
'{ip}/{prefix}'.format(
ip=privateIp, prefix=netPrefix))
- if dev_config:
+ if dev_config and has_ip_address:
mac = ':'.join(re.findall(r'..', intf['macAddress']))
dev_config.update({
'match': {'macaddress': mac.lower()},
@@ -2027,7 +2116,8 @@ def _generate_network_config_from_fallback_config() -> dict:
@azure_ds_telemetry_reporter
def get_metadata_from_imds(fallback_nic,
retries,
- md_type=metadata_type.compute):
+ md_type=metadata_type.compute,
+ api_version=IMDS_VER_MIN):
"""Query Azure's instance metadata service, returning a dictionary.
If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
@@ -2037,13 +2127,16 @@ def get_metadata_from_imds(fallback_nic,
@param fallback_nic: String. The name of the nic which requires active
network in order to query IMDS.
@param retries: The number of retries of the IMDS_URL.
+ @param md_type: Metadata type for IMDS request.
+ @param api_version: IMDS api-version to use in the request.
@return: A dict of instance metadata containing compute and network
info.
"""
kwargs = {'logfunc': LOG.debug,
'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
- 'func': _get_metadata_from_imds, 'args': (retries, md_type,)}
+ 'func': _get_metadata_from_imds,
+ 'args': (retries, md_type, api_version,)}
if net.is_up(fallback_nic):
return util.log_time(**kwargs)
else:
@@ -2059,20 +2152,26 @@ def get_metadata_from_imds(fallback_nic,
@azure_ds_telemetry_reporter
-def _get_metadata_from_imds(retries, md_type=metadata_type.compute):
-
- url = md_type.value
+def _get_metadata_from_imds(
+ retries,
+ md_type=metadata_type.compute,
+ api_version=IMDS_VER_MIN):
+ url = "{}?api-version={}".format(md_type.value, api_version)
headers = {"Metadata": "true"}
try:
response = readurl(
url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
retries=retries, exception_cb=retry_on_url_exc)
except Exception as e:
- report_diagnostic_event(
- 'Ignoring IMDS instance metadata. '
- 'Get metadata from IMDS failed: %s' % e,
- logger_func=LOG.warning)
- return {}
+ # pylint:disable=no-member
+ if isinstance(e, UrlError) and e.code == 400:
+ raise
+ else:
+ report_diagnostic_event(
+ 'Ignoring IMDS instance metadata. '
+ 'Get metadata from IMDS failed: %s' % e,
+ logger_func=LOG.warning)
+ return {}
try:
from json.decoder import JSONDecodeError
json_decode_error = JSONDecodeError
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 1930a509..a2105dc7 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -765,13 +765,14 @@ def convert_ec2_metadata_network_config(
netcfg['ethernets'][nic_name] = dev_config
return netcfg
# Apply network config for all nics and any secondary IPv4/v6 addresses
+ nic_idx = 0
for mac, nic_name in sorted(macs_to_nics.items()):
nic_metadata = macs_metadata.get(mac)
if not nic_metadata:
continue # Not a physical nic represented in metadata
# device-number is zero-indexed, we want it 1-indexed for the
# multiplication on the following line
- nic_idx = int(nic_metadata['device-number']) + 1
+ nic_idx = int(nic_metadata.get('device-number', nic_idx)) + 1
dhcp_override = {'route-metric': nic_idx * 100}
dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
'dhcp6': False,
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 741c140a..bbeada0b 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,6 +16,7 @@ from xml.dom import minidom
from cloudinit import dmi
from cloudinit import log as logging
+from cloudinit import safeyaml
from cloudinit import sources
from cloudinit import subp
from cloudinit import util
@@ -47,6 +48,7 @@ LOG = logging.getLogger(__name__)
CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg"
GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
+VMWARE_IMC_DIR = "/var/run/vmware-imc"
class DataSourceOVF(sources.DataSource):
@@ -99,9 +101,7 @@ class DataSourceOVF(sources.DataSource):
if not self.vmware_customization_supported:
LOG.debug("Skipping the check for "
"VMware Customization support")
- elif not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True):
-
+ else:
search_paths = (
"/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
"/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools")
@@ -119,7 +119,9 @@ class DataSourceOVF(sources.DataSource):
# When the VM is powered on, the "VMware Tools" daemon
# copies the customization specification file to
# /var/run/vmware-imc directory. cloud-init code needs
- # to search for the file in that directory.
+ # to search for the file in that directory which indicates
+ # that required metadata and userdata files are now
+ # present.
max_wait = get_max_wait_from_cfg(self.ds_cfg)
vmwareImcConfigFilePath = util.log_time(
logfunc=LOG.debug,
@@ -129,26 +131,83 @@ class DataSourceOVF(sources.DataSource):
else:
LOG.debug("Did not find the customization plugin.")
+ md_path = None
if vmwareImcConfigFilePath:
+ imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
+ cf = ConfigFile(vmwareImcConfigFilePath)
+ self._vmware_cust_conf = Config(cf)
LOG.debug("Found VMware Customization Config File at %s",
vmwareImcConfigFilePath)
- nicspath = wait_for_imc_cfg_file(
- filename="nics.txt", maxwait=10, naplen=5)
+ try:
+ (md_path, ud_path, nicspath) = collect_imc_file_paths(
+ self._vmware_cust_conf)
+ except FileNotFoundError as e:
+ _raise_error_status(
+ "File(s) missing in directory",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
else:
LOG.debug("Did not find VMware Customization Config File")
- else:
- LOG.debug("Customization for VMware platform is disabled.")
- if vmwareImcConfigFilePath:
+ # Honor disable_vmware_customization setting on metadata absent
+ if not md_path:
+ if util.get_cfg_option_bool(self.sys_cfg,
+ "disable_vmware_customization",
+ True):
+ LOG.debug(
+ "Customization for VMware platform is disabled.")
+ # reset vmwareImcConfigFilePath to None to avoid
+ # customization for VMware platform
+ vmwareImcConfigFilePath = None
+
+ use_raw_data = bool(vmwareImcConfigFilePath and md_path)
+ if use_raw_data:
+ set_gc_status(self._vmware_cust_conf, "Started")
+ LOG.debug("Start to load cloud-init meta data and user data")
+ try:
+ (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path)
+
+ if network:
+ self._network_config = network
+ else:
+ self._network_config = (
+ self.distro.generate_fallback_config()
+ )
+
+ except safeyaml.YAMLError as e:
+ _raise_error_status(
+ "Error parsing the cloud-init meta data",
+ e,
+ GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
+ except Exception as e:
+ _raise_error_status(
+ "Error loading cloud-init configuration",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
+
+ self._vmware_cust_found = True
+ found.append('vmware-tools')
+
+ util.del_dir(imcdirpath)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_DONE,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ set_gc_status(self._vmware_cust_conf, "Successful")
+
+ elif vmwareImcConfigFilePath:
+ # Load configuration from vmware_imc
self._vmware_nics_to_enable = ""
try:
- cf = ConfigFile(vmwareImcConfigFilePath)
- self._vmware_cust_conf = Config(cf)
set_gc_status(self._vmware_cust_conf, "Started")
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
- imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
product_marker = self._vmware_cust_conf.marker_id
hasmarkerfile = check_marker_exists(
product_marker, os.path.join(self.paths.cloud_dir, 'data'))
@@ -357,7 +416,7 @@ class DataSourceOVFNet(DataSourceOVF):
def get_max_wait_from_cfg(cfg):
- default_max_wait = 90
+ default_max_wait = 15
max_wait_cfg_option = 'vmware_cust_file_max_wait'
max_wait = default_max_wait
@@ -684,4 +743,83 @@ def _raise_error_status(prefix, error, event, config_file, conf):
util.del_dir(os.path.dirname(config_file))
raise error
+
+def load_cloudinit_data(md_path, ud_path):
+ """
+ Load the cloud-init meta data, user data, cfg and network from the
+ given files
+
+ @return: 4-tuple of configuration
+ metadata, userdata, cfg={}, network
+
+ @raises: FileNotFoundError if md_path or ud_path are absent
+ """
+ LOG.debug('load meta data from: %s: user data from: %s',
+ md_path, ud_path)
+ md = {}
+ ud = None
+ network = None
+
+ md = safeload_yaml_or_dict(util.load_file(md_path))
+
+ if 'network' in md:
+ network = md['network']
+
+ if ud_path:
+ ud = util.load_file(ud_path).replace("\r", "")
+ return md, ud, {}, network
+
+
+def safeload_yaml_or_dict(data):
+ '''
+ The meta data could be JSON or YAML. Since YAML is a strict superset of
+ JSON, we will unmarshal the data as YAML. If data is None then a new
+ dictionary is returned.
+ '''
+ if not data:
+ return {}
+ return safeyaml.load(data)
+
+
+def collect_imc_file_paths(cust_conf):
+ '''
+ collect all the other imc files.
+
+ metadata is preferred to nics.txt configuration data.
+
+ If metadata file exists because it is specified in customization
+ configuration, then metadata is required and userdata is optional.
+
+ @return a 3-tuple containing desired configuration file paths if present
+ Expected returns:
+ 1. user provided metadata and userdata (md_path, ud_path, None)
+ 2. user provided metadata (md_path, None, None)
+ 3. user-provided network config (None, None, nics_path)
+ 4. No config found (None, None, None)
+ '''
+ md_path = None
+ ud_path = None
+ nics_path = None
+ md_file = cust_conf.meta_data_name
+ if md_file:
+ md_path = os.path.join(VMWARE_IMC_DIR, md_file)
+ if not os.path.exists(md_path):
+ raise FileNotFoundError("meta data file is not found: %s"
+ % md_path)
+
+ ud_file = cust_conf.user_data_name
+ if ud_file:
+ ud_path = os.path.join(VMWARE_IMC_DIR, ud_file)
+ if not os.path.exists(ud_path):
+ raise FileNotFoundError("user data file is not found: %s"
+ % ud_path)
+ else:
+ nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt")
+ if not os.path.exists(nics_path):
+ LOG.debug('%s does not exist.', nics_path)
+ nics_path = None
+
+ return md_path, ud_path, nics_path
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index b3406c67..619a171e 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -167,6 +167,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ vd2 = results.get('vendordata2')
+ self.vendordata2_pure = vd2
+ try:
+ self.vendordata2_raw = sources.convert_vendordata(vd2)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data2: %s", e)
+ self.vendordata2_raw = None
+
return True
def _crawl_metadata(self):
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
new file mode 100644
index 00000000..209b9672
--- /dev/null
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -0,0 +1,165 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# UpCloud server metadata API:
+# https://developers.upcloud.com/1.3/8-servers/#metadata-service
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit import net as cloudnet
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+
+
+from cloudinit.sources.helpers import upcloud as uc_helper
+
+LOG = logging.getLogger(__name__)
+
+BUILTIN_DS_CONFIG = {"metadata_url": "http://169.254.169.254/metadata/v1.json"}
+
+# Wait for a up to a minute, retrying the meta-data server
+# every 2 seconds.
+MD_RETRIES = 30
+MD_TIMEOUT = 2
+MD_WAIT_RETRY = 2
+
+
+class DataSourceUpCloud(sources.DataSource):
+
+ dsname = "UpCloud"
+
+ # We'll perform DHCP setup only in init-local, see DataSourceUpCloudLocal
+ perform_dhcp_setup = False
+
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.distro = distro
+ self.metadata = dict()
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "UpCloud"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
+ self._network_config = None
+
+ def _get_sysinfo(self):
+ return uc_helper.read_sysinfo()
+
+ def _read_metadata(self):
+ return uc_helper.read_metadata(
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+
+ def _get_data(self):
+ (is_upcloud, server_uuid) = self._get_sysinfo()
+
+ # only proceed if we know we are on UpCloud
+ if not is_upcloud:
+ return False
+
+ LOG.info("Running on UpCloud. server_uuid=%s", server_uuid)
+
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
+ try:
+ LOG.debug("Finding a fallback NIC")
+ nic = cloudnet.find_fallback_nic()
+ LOG.debug("Discovering metadata via DHCP interface %s", nic)
+ with EphemeralDHCPv4(nic):
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
+ util.logexc(LOG, str(e))
+ return False
+ else:
+ try:
+ LOG.debug(
+ "Discovering metadata without DHCP-configured networking"
+ )
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except sources.InvalidMetaDataException as e:
+ util.logexc(LOG, str(e))
+ LOG.info(
+ "No DHCP-enabled interfaces available, "
+ "unable to fetch metadata for %s",
+ server_uuid,
+ )
+ return False
+
+ self.metadata_full = md
+ self.metadata["instance-id"] = md.get("instance_id", server_uuid)
+ self.metadata["local-hostname"] = md.get("hostname")
+ self.metadata["network"] = md.get("network")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
+ self.vendordata_raw = md.get("vendor_data", None)
+ self.userdata_raw = md.get("user_data", None)
+
+ return True
+
+ def check_instance_id(self, sys_cfg):
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ @property
+ def network_config(self):
+ """
+ Configure the networking. This needs to be done each boot,
+ since the IP and interface information might have changed
+ due to reconfiguration.
+ """
+
+ if self._network_config:
+ return self._network_config
+
+ raw_network_config = self.metadata.get("network")
+ if not raw_network_config:
+ raise Exception("Unable to get network meta-data from server....")
+
+ self._network_config = uc_helper.convert_network_config(
+ raw_network_config,
+ )
+
+ return self._network_config
+
+
+class DataSourceUpCloudLocal(DataSourceUpCloud):
+ """
+ Run in init-local using a DHCP discovery prior to metadata crawl.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+
+ perform_dhcp_setup = True # Get metadata network config if present
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM, )),
+ (DataSourceUpCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 9dccc687..1ad1880d 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -187,7 +187,8 @@ class DataSource(metaclass=abc.ABCMeta):
cached_attr_defaults = (
('ec2_metadata', UNSET), ('network_json', UNSET),
('metadata', {}), ('userdata', None), ('userdata_raw', None),
- ('vendordata', None), ('vendordata_raw', None))
+ ('vendordata', None), ('vendordata_raw', None),
+ ('vendordata2', None), ('vendordata2_raw', None))
_dirty_cache = False
@@ -203,7 +204,9 @@ class DataSource(metaclass=abc.ABCMeta):
self.metadata = {}
self.userdata_raw = None
self.vendordata = None
+ self.vendordata2 = None
self.vendordata_raw = None
+ self.vendordata2_raw = None
self.ds_cfg = util.get_cfg_by_path(
self.sys_cfg, ("datasource", self.dsname), {})
@@ -392,6 +395,11 @@ class DataSource(metaclass=abc.ABCMeta):
self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
return self.vendordata
+ def get_vendordata2(self):
+ if self.vendordata2 is None:
+ self.vendordata2 = self.ud_proc.process(self.get_vendordata2_raw())
+ return self.vendordata2
+
@property
def fallback_interface(self):
"""Determine the network interface used during local network config."""
@@ -494,6 +502,9 @@ class DataSource(metaclass=abc.ABCMeta):
def get_vendordata_raw(self):
return self.vendordata_raw
+ def get_vendordata2_raw(self):
+ return self.vendordata2_raw
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 3e6365f1..4f566e64 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -247,6 +247,11 @@ class BaseReader(metaclass=abc.ABCMeta):
False,
load_json_anytype,
)
+ files['vendordata2'] = (
+ self._path_join("openstack", version, 'vendor_data2.json'),
+ False,
+ load_json_anytype,
+ )
files['networkdata'] = (
self._path_join("openstack", version, 'network_data.json'),
False,
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
index 2bde1e3f..95fb9743 100644
--- a/cloudinit/sources/helpers/tests/test_openstack.py
+++ b/cloudinit/sources/helpers/tests/test_openstack.py
@@ -1,10 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
# ./cloudinit/sources/helpers/tests/test_openstack.py
+from unittest import mock
from cloudinit.sources.helpers import openstack
from cloudinit.tests import helpers as test_helpers
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestConvertNetJson(test_helpers.CiTestCase):
def test_phy_types(self):
diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py
new file mode 100644
index 00000000..199baa58
--- /dev/null
+++ b/cloudinit/sources/helpers/upcloud.py
@@ -0,0 +1,231 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import url_helper
+
+LOG = logging.getLogger(__name__)
+
+
+def convert_to_network_config_v1(config):
+ """
+ Convert the UpCloud network metadata description into
+ Cloud-init's version 1 netconfig format.
+
+ Example JSON:
+ {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": [],
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "32:d5:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "32:d5:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "32:d5:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "32:d5:ba:4a:8a:e1",
+ "network_id": "035a0a4a-77b4-4de5-820d-189fc8135714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ }
+ """
+
+ def _get_subnet_config(ip_addr, dns):
+ if ip_addr.get("dhcp"):
+ dhcp_type = "dhcp"
+ if ip_addr.get("family") == "IPv6":
+ # UpCloud currently passes IPv6 addresses via
+ # StateLess Address Auto Configuration (SLAAC)
+ dhcp_type = "ipv6_dhcpv6-stateless"
+ return {"type": dhcp_type}
+
+ static_type = "static"
+ if ip_addr.get("family") == "IPv6":
+ static_type = "static6"
+ subpart = {
+ "type": static_type,
+ "control": "auto",
+ "address": ip_addr.get("address"),
+ }
+
+ if ip_addr.get("gateway"):
+ subpart["gateway"] = ip_addr.get("gateway")
+
+ if "/" in ip_addr.get("network"):
+ subpart["netmask"] = ip_addr.get("network").split("/")[1]
+
+ if dns != ip_addr.get("dns") and ip_addr.get("dns"):
+ subpart["dns_nameservers"] = ip_addr.get("dns")
+
+ return subpart
+
+ nic_configs = []
+ macs_to_interfaces = cloudnet.get_interfaces_by_mac()
+ LOG.debug("NIC mapping: %s", macs_to_interfaces)
+
+ for raw_iface in config.get("interfaces"):
+ LOG.debug("Considering %s", raw_iface)
+
+ mac_address = raw_iface.get("mac")
+ if mac_address not in macs_to_interfaces:
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, raw_iface)
+ )
+
+ iface_type = raw_iface.get("type")
+ sysfs_name = macs_to_interfaces.get(mac_address)
+
+ LOG.debug(
+ "Found %s interface '%s' with address '%s' (index %d)",
+ iface_type,
+ sysfs_name,
+ mac_address,
+ raw_iface.get("index"),
+ )
+
+ interface = {
+ "type": "physical",
+ "name": sysfs_name,
+ "mac_address": mac_address
+ }
+
+ subnets = []
+ for ip_address in raw_iface.get("ip_addresses"):
+ sub_part = _get_subnet_config(ip_address, config.get("dns"))
+ subnets.append(sub_part)
+
+ interface["subnets"] = subnets
+ nic_configs.append(interface)
+
+ if config.get("dns"):
+ LOG.debug("Setting DNS nameservers to %s", config.get("dns"))
+ nic_configs.append({
+ "type": "nameserver",
+ "address": config.get("dns")
+ })
+
+ return {"version": 1, "config": nic_configs}
+
+
+def convert_network_config(config):
+ return convert_to_network_config_v1(config)
+
+
+def read_metadata(url, timeout=2, sec_between=2, retries=30):
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
+ if not response.ok():
+ raise RuntimeError("unable to read metadata at %s" % url)
+ return json.loads(response.contents.decode())
+
+
+def read_sysinfo():
+ # UpCloud embeds vendor ID and server UUID in the
+ # SMBIOS information
+
+ # Detect if we are on UpCloud and return the UUID
+
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name != "UpCloud":
+ return False, None
+
+ server_uuid = dmi.read_dmi_data("system-uuid")
+ if server_uuid:
+ LOG.debug(
+ "system identified via SMBIOS as UpCloud server: %s",
+ server_uuid
+ )
+ else:
+ msg = (
+ "system identified via SMBIOS as a UpCloud server, but "
+ "did not provide an ID. Please contact support via"
+ "https://hub.upcloud.com or via email with support@upcloud.com"
+ )
+ LOG.critical(msg)
+ raise RuntimeError(msg)
+
+ return True, server_uuid
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 7109aef3..bdfab5a0 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -27,6 +27,8 @@ class Config(object):
UTC = 'DATETIME|UTC'
POST_GC_STATUS = 'MISC|POST-GC-STATUS'
DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
+ CLOUDINIT_META_DATA = 'CLOUDINIT|METADATA'
+ CLOUDINIT_USER_DATA = 'CLOUDINIT|USERDATA'
def __init__(self, configFile):
self._configFile = configFile
@@ -130,4 +132,14 @@ class Config(object):
raise ValueError('defaultRunPostScript value should be yes/no')
return defaultRunPostScript == 'yes'
+ @property
+ def meta_data_name(self):
+ """Return the name of cloud-init meta data."""
+ return self._configFile.get(Config.CLOUDINIT_META_DATA, None)
+
+ @property
+ def user_data_name(self):
+ """Return the name of cloud-init user data."""
+ return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index 65ae7390..96d839b8 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -11,5 +11,6 @@ class GuestCustErrorEnum(object):
GUESTCUST_ERROR_SUCCESS = 0
GUESTCUST_ERROR_SCRIPT_DISABLED = 6
+ GUESTCUST_ERROR_WRONG_META_FORMAT = 9
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index a7bbdfd9..dcf33b9b 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -173,6 +173,10 @@ class TestIsPlatformViable(test_helpers.CiTestCase):
m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestNetworkConfigFromOpcImds:
def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
oracle_ds._vnics_data = [{}]
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 0cce6e80..5bacc85d 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -360,8 +360,18 @@ class Init(object):
reporter=self.reporter)
def update(self):
- self._store_userdata()
- self._store_vendordata()
+ self._store_rawdata(self.datasource.get_userdata_raw(),
+ 'userdata')
+ self._store_processeddata(self.datasource.get_userdata(),
+ 'userdata')
+ self._store_raw_vendordata(self.datasource.get_vendordata_raw(),
+ 'vendordata')
+ self._store_processeddata(self.datasource.get_vendordata(),
+ 'vendordata')
+ self._store_raw_vendordata(self.datasource.get_vendordata2_raw(),
+ 'vendordata2')
+ self._store_processeddata(self.datasource.get_vendordata2(),
+ 'vendordata2')
def setup_datasource(self):
with events.ReportEventStack("setup-datasource",
@@ -381,28 +391,28 @@ class Init(object):
is_new_instance=self.is_new_instance())
self._write_to_cache()
- def _store_userdata(self):
- raw_ud = self.datasource.get_userdata_raw()
- if raw_ud is None:
- raw_ud = b''
- util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
- # processed userdata is a Mime message, so write it as string.
- processed_ud = self.datasource.get_userdata()
- if processed_ud is None:
- raw_ud = ''
- util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
-
- def _store_vendordata(self):
- raw_vd = self.datasource.get_vendordata_raw()
- if raw_vd is None:
- raw_vd = b''
- util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
- # processed vendor data is a Mime message, so write it as string.
- processed_vd = str(self.datasource.get_vendordata())
- if processed_vd is None:
- processed_vd = ''
- util.write_file(self._get_ipath('vendordata'), str(processed_vd),
- 0o600)
+ def _store_rawdata(self, data, datasource):
+ # Raw data is bytes, not a string
+ if data is None:
+ data = b''
+ util.write_file(self._get_ipath('%s_raw' % datasource), data, 0o600)
+
+ def _store_raw_vendordata(self, data, datasource):
+ # Only these data types
+ if data is not None and type(data) not in [bytes, str, list]:
+ raise TypeError("vendordata_raw is unsupported type '%s'" %
+ str(type(data)))
+ # This data may be a list, convert it to a string if so
+ if isinstance(data, list):
+ data = util.json_dumps(data)
+ self._store_rawdata(data, datasource)
+
+ def _store_processeddata(self, processed_data, datasource):
+ # processed is a Mime message, so write as string.
+ if processed_data is None:
+ processed_data = ''
+ util.write_file(self._get_ipath(datasource),
+ str(processed_data), 0o600)
def _default_handlers(self, opts=None):
if opts is None:
@@ -434,6 +444,11 @@ class Init(object):
opts={'script_path': 'vendor_scripts',
'cloud_config_path': 'vendor_cloud_config'})
+ def _default_vendordata2_handlers(self):
+ return self._default_handlers(
+ opts={'script_path': 'vendor_scripts',
+ 'cloud_config_path': 'vendor2_cloud_config'})
+
def _do_handlers(self, data_msg, c_handlers_list, frequency,
excluded=None):
"""
@@ -555,7 +570,12 @@ class Init(object):
with events.ReportEventStack("consume-vendor-data",
"reading and applying vendor-data",
parent=self.reporter):
- self._consume_vendordata(frequency)
+ self._consume_vendordata("vendordata", frequency)
+
+ with events.ReportEventStack("consume-vendor-data2",
+ "reading and applying vendor-data2",
+ parent=self.reporter):
+ self._consume_vendordata("vendordata2", frequency)
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
@@ -568,46 +588,62 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
- def _consume_vendordata(self, frequency=PER_INSTANCE):
+ def _consume_vendordata(self, vendor_source, frequency=PER_INSTANCE):
"""
Consume the vendordata and run the part handlers on it
"""
+
# User-data should have been consumed first.
# So we merge the other available cloud-configs (everything except
# vendor provided), and check whether or not we should consume
# vendor data at all. That gives user or system a chance to override.
- if not self.datasource.get_vendordata_raw():
- LOG.debug("no vendordata from datasource")
- return
+ if vendor_source == 'vendordata':
+ if not self.datasource.get_vendordata_raw():
+ LOG.debug("no vendordata from datasource")
+ return
+ cfg_name = 'vendor_data'
+ elif vendor_source == 'vendordata2':
+ if not self.datasource.get_vendordata2_raw():
+ LOG.debug("no vendordata2 from datasource")
+ return
+ cfg_name = 'vendor_data2'
+ else:
+ raise RuntimeError("vendor_source arg must be either 'vendordata'"
+ " or 'vendordata2'")
_cc_merger = helpers.ConfigMerger(paths=self._paths,
datasource=self.datasource,
additional_fns=[],
base_cfg=self.cfg,
include_vendor=False)
- vdcfg = _cc_merger.cfg.get('vendor_data', {})
+ vdcfg = _cc_merger.cfg.get(cfg_name, {})
if not isinstance(vdcfg, dict):
vdcfg = {'enabled': False}
- LOG.warning("invalid 'vendor_data' setting. resetting to: %s",
- vdcfg)
+ LOG.warning("invalid %s setting. resetting to: %s",
+ cfg_name, vdcfg)
enabled = vdcfg.get('enabled')
no_handlers = vdcfg.get('disabled_handlers', None)
if not util.is_true(enabled):
- LOG.debug("vendordata consumption is disabled.")
+ LOG.debug("%s consumption is disabled.", vendor_source)
return
- LOG.debug("vendor data will be consumed. disabled_handlers=%s",
- no_handlers)
+ LOG.debug("%s will be consumed. disabled_handlers=%s",
+ vendor_source, no_handlers)
- # Ensure vendordata source fetched before activation (just incase)
- vendor_data_msg = self.datasource.get_vendordata()
+ # Ensure vendordata source fetched before activation (just in case.)
- # This keeps track of all the active handlers, while excluding what the
- # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
- c_handlers_list = self._default_vendordata_handlers()
+ # c_handlers_list keeps track of all the active handlers, while
+ # excluding what the users doesn't want run, i.e. boot_hook,
+ # cloud_config, shell_script
+ if vendor_source == 'vendordata':
+ vendor_data_msg = self.datasource.get_vendordata()
+ c_handlers_list = self._default_vendordata_handlers()
+ else:
+ vendor_data_msg = self.datasource.get_vendordata2()
+ c_handlers_list = self._default_vendordata2_handlers()
# Run the handlers
self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index b7a302f1..e811917e 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -851,4 +851,60 @@ class TestEnsureFile:
assert "ab" == kwargs["omode"]
+@mock.patch("cloudinit.util.grp.getgrnam")
+@mock.patch("cloudinit.util.os.setgid")
+@mock.patch("cloudinit.util.os.umask")
+class TestRedirectOutputPreexecFn:
+ """This tests specifically the preexec_fn used in redirect_output."""
+
+ @pytest.fixture(params=["outfmt", "errfmt"])
+ def preexec_fn(self, request):
+ """A fixture to gather the preexec_fn used by redirect_output.
+
+ This enables simpler direct testing of it, and parameterises any tests
+ using it to cover both the stdout and stderr code paths.
+ """
+ test_string = "| piped output to invoke subprocess"
+ if request.param == "outfmt":
+ args = (test_string, None)
+ elif request.param == "errfmt":
+ args = (None, test_string)
+ with mock.patch("cloudinit.util.subprocess.Popen") as m_popen:
+ util.redirect_output(*args)
+
+ assert 1 == m_popen.call_count
+ _args, kwargs = m_popen.call_args
+ assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen"
+ return kwargs["preexec_fn"]
+
+ def test_preexec_fn_sets_umask(
+ self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn
+ ):
+ """preexec_fn should set a mask that avoids world-readable files."""
+ preexec_fn()
+
+ assert [mock.call(0o037)] == m_os_umask.call_args_list
+
+ def test_preexec_fn_sets_group_id_if_adm_group_present(
+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn
+ ):
+ """We should setgrp to adm if present, so files are owned by them."""
+ fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid)
+ m_getgrnam.return_value = fake_group
+
+ preexec_fn()
+
+ assert [mock.call("adm")] == m_getgrnam.call_args_list
+ assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list
+
+ def test_preexec_fn_handles_absent_adm_group_gracefully(
+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn
+ ):
+ """We should handle an absent adm group gracefully."""
+ m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'")
+
+ preexec_fn()
+
+ assert 0 == m_setgid.call_count
+
# vi: ts=4 expandtab
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 769f3425..4e0a72db 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -359,7 +359,7 @@ def find_modules(root_dir):
def multi_log(text, console=True, stderr=True,
- log=None, log_level=logging.DEBUG):
+ log=None, log_level=logging.DEBUG, fallback_to_stdout=True):
if stderr:
sys.stderr.write(text)
if console:
@@ -368,7 +368,7 @@ def multi_log(text, console=True, stderr=True,
with open(conpath, 'w') as wfh:
wfh.write(text)
wfh.flush()
- else:
+ elif fallback_to_stdout:
# A container may lack /dev/console (arguably a container bug). If
# it does not exist, then write output to stdout. this will result
# in duplicate stderr and stdout messages if stderr was True.
@@ -623,6 +623,26 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
if not o_err:
o_err = sys.stderr
+ # pylint: disable=subprocess-popen-preexec-fn
+ def set_subprocess_umask_and_gid():
+ """Reconfigure umask and group ID to create output files securely.
+
+ This is passed to subprocess.Popen as preexec_fn, so it is executed in
+ the context of the newly-created process. It:
+
+ * sets the umask of the process so created files aren't world-readable
+ * if an adm group exists in the system, sets that as the process' GID
+ (so that the created file(s) are owned by root:adm)
+ """
+ os.umask(0o037)
+ try:
+ group_id = grp.getgrnam("adm").gr_gid
+ except KeyError:
+ # No adm group, don't set a group
+ pass
+ else:
+ os.setgid(group_id)
+
if outfmt:
LOG.debug("Redirecting %s to %s", o_out, outfmt)
(mode, arg) = outfmt.split(" ", 1)
@@ -632,7 +652,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
+ proc = subprocess.Popen(
+ arg,
+ shell=True,
+ stdin=subprocess.PIPE,
+ preexec_fn=set_subprocess_umask_and_gid,
+ )
new_fp = proc.stdin
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
@@ -654,7 +679,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
+ proc = subprocess.Popen(
+ arg,
+ shell=True,
+ stdin=subprocess.PIPE,
+ preexec_fn=set_subprocess_umask_and_gid,
+ )
new_fp = proc.stdin
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 36ec728e..94afd60d 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "20.4.1"
+__VERSION__ = "21.1"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt
index aad8b683..bfe5ab44 100644
--- a/doc/examples/cloud-config-ssh-keys.txt
+++ b/doc/examples/cloud-config-ssh-keys.txt
@@ -42,3 +42,13 @@ ssh_keys:
-----END DSA PRIVATE KEY-----
dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
+
+# By default, the fingerprints of the authorized keys for the users
+# cloud-init adds are printed to the console. Setting
+# no_ssh_fingerprints to true suppresses this output.
+no_ssh_fingerprints: false
+
+# By default, (most) ssh host keys are printed to the console. Setting
+# emit_keys_to_console to false suppresses this output.
+ssh:
+ emit_keys_to_console: false
diff --git a/doc/examples/part-handler.txt b/doc/examples/part-handler.txt
index a6e66415..1484e1a0 100644
--- a/doc/examples/part-handler.txt
+++ b/doc/examples/part-handler.txt
@@ -1,5 +1,4 @@
#part-handler
-# vi: syntax=python ts=4
def list_types():
# return a list of mime-types that are handled by this module
diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1
index 9b52dc8d..3fde4148 100644
--- a/doc/man/cloud-init.1
+++ b/doc/man/cloud-init.1
@@ -10,7 +10,7 @@ cloud-init \- Cloud instance initialization
Cloud-init provides a mechanism for cloud instance initialization.
This is done by identifying the cloud platform that is in use, reading
provided cloud metadata and optional vendor and user
-data, and then intializing the instance as requested.
+data, and then initializing the instance as requested.
Generally, this command is not normally meant to be run directly by
the user. However, some subcommands may useful for development or
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index ddcb0b31..10e8228f 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -75,6 +75,7 @@ Having trouble? We would like to help!
topics/dir_layout.rst
topics/analyze.rst
topics/docs.rst
+ topics/testing.rst
topics/integration_tests.rst
topics/cloud_tests.rst
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index 8f56a7d2..f58b2b38 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -55,6 +55,7 @@ environments in the public cloud:
- CloudStack
- AltCloud
- SmartOS
+- UpCloud
Additionally, cloud-init is supported on these private clouds:
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 3d026143..228173d2 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -47,6 +47,7 @@ The following is a list of documents for each supported datasource:
datasources/ovf.rst
datasources/rbxcloud.rst
datasources/smartos.rst
+ datasources/upcloud.rst
datasources/zstack.rst
diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst
index 3f4f40ca..587bd4f4 100644
--- a/doc/rtd/topics/datasources/aliyun.rst
+++ b/doc/rtd/topics/datasources/aliyun.rst
@@ -12,6 +12,21 @@ The Alibaba Cloud metadata service is available at the well known url
Alibaba Cloud ECS on `metadata
<https://www.alibabacloud.com/help/zh/faq-detail/49122.htm>`__.
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ AliYun:
+ metadata_urls: ["http://100.100.100.200"]
+ timeout: 50
+ max_wait: 120
+
Versions
^^^^^^^^
Like the EC2 metadata service, Alibaba Cloud's metadata service provides
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index a24de34f..325aeeaf 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -46,8 +46,6 @@ An example configuration with the default values is provided below:
CloudStack:
max_wait: 120
timeout: 50
- datasource_list:
- - CloudStack
.. _Apache CloudStack: http://cloudstack.apache.org/
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 0ca79102..edb41e2a 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -117,7 +117,7 @@ yaml formatted data in a file named ``network-config``. If found,
this file will override a ``network-interfaces`` file.
See an example below. Note specifically that this file does not
-have a top level ``network`` key as it it is already assumed to
+have a top level ``network`` key as it is already assumed to
be network configuration based on the filename.
.. code:: yaml
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index b23b4b7c..62d0fc03 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -82,4 +82,12 @@ For more general information about how cloud-init handles vendor data,
including how it can be disabled by users on instances, see
:doc:`/topics/vendordata`.
+OpenStack can also be configured to provide 'dynamic vendordata'
+which is provided by the DynamicJSON provider and appears under a
+different metadata path, /vendor_data2.json.
+
+Cloud-init will look for a ``cloud-init`` at the vendor_data2 path; if found,
+settings are applied after (and, hence, overriding) the settings from static
+vendor data. Both sets of vendor data can be overridden by user data.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
index 6256e624..85b0c377 100644
--- a/doc/rtd/topics/datasources/ovf.rst
+++ b/doc/rtd/topics/datasources/ovf.rst
@@ -13,6 +13,15 @@ source code tree in doc/sources/ovf
Configuration
-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that
+ should be spent waiting for vmware customization files. (default: 15)
+
+
On VMware platforms, VMTools use is required for OVF datasource configuration
settings as well as vCloud and vSphere admin configuration. User could change
the VMTools configuration options with command::
diff --git a/doc/rtd/topics/datasources/upcloud.rst b/doc/rtd/topics/datasources/upcloud.rst
new file mode 100644
index 00000000..0b7a9bb0
--- /dev/null
+++ b/doc/rtd/topics/datasources/upcloud.rst
@@ -0,0 +1,24 @@
+.. _datasource_upcloud:
+
+UpCloud
+=============
+
+The `UpCloud`_ datasource consumes information from UpCloud's `metadata
+service`_. This metadata service serves information about the
+running server via HTTP over the address 169.254.169.254 available in every
+DHCP-configured interface. The metadata API endpoints are fully described in
+UpCloud API documentation at
+`https://developers.upcloud.com/1.3/8-servers/#metadata-service
+<https://developers.upcloud.com/1.3/8-servers/#metadata-service>`_.
+
+Providing user-data
+-------------------
+
+When creating a server, user-data is provided by specifying it as `user_data`
+in the API or via the server creation tool in the control panel. User-data is
+immutable during server's lifetime and can be removed by deleting the server.
+
+.. _UpCloud: https://upcloud.com/
+.. _metadata service: https://upcloud.com/community/tutorials/upcloud-metadata-service/
+
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index 0d416f32..fb3006fe 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -1,6 +1,6 @@
-********************************
-Testing and debugging cloud-init
-********************************
+********************
+Debugging cloud-init
+********************
Overview
========
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index 81860f85..97fd616d 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -149,8 +149,8 @@ Disk setup
:language: yaml
:linenos:
-Register RedHat Subscription
-============================
+Register Red Hat Subscription
+=============================
.. literalinclude:: ../../examples/cloud-config-rh_subscription.txt
:language: yaml
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index d03e4caf..fa8aa925 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -23,9 +23,11 @@ Using a mime-multi part file, the user can specify more than one type of data.
For example, both a user data script and a cloud-config type could be
specified.
-Supported content-types are listed from the cloud-init subcommand make-mime::
+Supported content-types are listed from the cloud-init subcommand make-mime:
- % cloud-init devel make-mime --list-types
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime --list-types
cloud-boothook
cloud-config
cloud-config-archive
@@ -47,9 +49,11 @@ The cloud-init subcommand can generate MIME multi-part files: `make-mime`_.
separated by a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME
multipart message to stdout. An example invocation, assuming you have your
cloud config in ``config.yaml`` and a shell script in ``script.sh`` and want
-to store the multipart message in ``user-data``::
+to store the multipart message in ``user-data``:
+
+.. code-block:: shell-session
- % cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
+ $ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
.. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py
@@ -70,7 +74,7 @@ archive.
Example
-------
-::
+.. code-block:: shell-session
$ cat myscript.sh
diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst
index aeda326c..6c124ad9 100644
--- a/doc/rtd/topics/integration_tests.rst
+++ b/doc/rtd/topics/integration_tests.rst
@@ -9,11 +9,41 @@ Overview
Integration tests are written using pytest and are located at
``tests/integration_tests``. General design principles
-laid out in :ref:`unit_testing` should be followed for integration tests.
+laid out in :ref:`testing` should be followed for integration tests.
Setup is accomplished via a set of fixtures located in
``tests/integration_tests/conftest.py``.
+Image Selection
+===============
+
+Each integration testing run uses a single image as its basis. This
+image is configured using the ``OS_IMAGE`` variable; see
+:ref:`Configuration` for details of how configuration works.
+
+``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g.
+"focal"), or an image specification. If an Ubuntu series name is
+given, then the most recent image for that series on the target cloud
+will be used. For other use cases, an image specification is used.
+
+In its simplest form, an image specification can simply be a cloud's
+image ID (e.g. "ami-deadbeef", "ubuntu:focal"). In this case, the
+image so-identified will be used as the basis for this testing run.
+
+This has a drawback, however: as we do not know what OS or release is
+within the image, the integration testing framework will run *all*
+tests against the image in question. If it's a RHEL8 image, then we
+would expect Ubuntu-specific tests to fail (and vice versa).
+
+To address this, a full image specification can be given. This is of
+the form: ``<image_id>[::<os>[::<release]]`` where ``image_id`` is a
+cloud's image ID, ``os`` is the OS name, and ``release`` is the OS
+release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is
+``ubuntu:bionic::ubuntu::bionic`` or RHEL 8 on Amazon is
+``ami-justanexample::rhel::8``. When a full specification is given,
+only tests which are intended for use on that OS and release will be
+executed.
+
Image Setup
===========
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 92e81897..17732c2a 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -414,9 +414,19 @@ Subnet types are one of the following:
- ``dhcp6``: Configure this interface with IPv6 dhcp.
- ``static``: Configure this interface with a static IPv4.
- ``static6``: Configure this interface with a static IPv6 .
+- ``ipv6_dhcpv6-stateful``: Configure this interface with ``dhcp6``
+- ``ipv6_dhcpv6-stateless``: Configure this interface with SLAAC and DHCP
+- ``ipv6_slaac``: Configure address with SLAAC
-When making use of ``dhcp`` types, no additional configuration is needed in
-the subnet dictionary.
+When making use of ``dhcp`` or either of the ``ipv6_dhcpv6`` types,
+no additional configuration is needed in the subnet dictionary.
+
+Using ``ipv6_dhcpv6-stateless`` or ``ipv6_slaac`` allows the IPv6 address to be
+automatically configured with StateLess Address AutoConfiguration (`SLAAC`_).
+SLAAC requires support from the network, so verify that your cloud or network
+offering has support before trying it out. With ``ipv6_dhcpv6-stateless``,
+DHCPv6 is still used to fetch other subnet details such as gateway or DNS
+servers. If you only want to discover the address, use ``ipv6_slaac``.
**Subnet DHCP Example**::
@@ -603,4 +613,6 @@ Some more examples to explore the various options available.
- dellstack
type: nameserver
+.. _SLAAC: https://tools.ietf.org/html/rfc4862
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index aa17bef5..af65a4ce 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -8,9 +8,25 @@ version 2 format defined for the `netplan`_ tool. Cloud-init supports
both reading and writing of Version 2; the latter support requires a
distro with `netplan`_ present.
+Netplan Passthrough
+-------------------
+
+On a system with netplan present, cloud-init will pass Version 2 configuration
+through to netplan without modification. On such systems, you do not need to
+limit yourself to the below subset of netplan's configuration format.
+
+.. warning::
+ If you are writing or generating network configuration that may be used on
+ non-netplan systems, you **must** limit yourself to the subset described in
+ this document, or you will see network configuration failures on
+ non-netplan systems.
+
+Version 2 Configuration Format
+------------------------------
+
The ``network`` key has at least two required elements. First
it must include ``version: 2`` and one or more of possible device
-``types``..
+``types``.
Cloud-init will read this format from system config.
For example the following could be present in
@@ -34,9 +50,6 @@ Each type block contains device definitions as a map where the keys (called
"configuration IDs"). Each entry under the ``types`` may include IP and/or
device configuration.
-Cloud-init does not current support ``wifis`` type that is present in native
-`netplan`_.
-
Device configuration IDs
------------------------
@@ -478,6 +491,11 @@ This is a complex example which shows most available features: ::
nameservers:
search: [foo.local, bar.local]
addresses: [8.8.8.8]
+ # static routes
+ routes:
+ - to: 192.0.2.0/24
+ via: 11.0.0.1
+ metric: 3
lom:
match:
driver: ixgbe
@@ -506,11 +524,6 @@ This is a complex example which shows most available features: ::
id: 1
link: id0
dhcp4: yes
- # static routes
- routes:
- - to: 0.0.0.0/0
- via: 11.0.0.1
- metric: 3
-.. _netplan: https://launchpad.net/netplan
+.. _netplan: https://netplan.io
.. vi: textwidth=78
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 08db04d8..07cad765 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -144,6 +144,10 @@ The following Datasources optionally provide network configuration:
- `SmartOS JSON Metadata`_
+- :ref:`datasource_upcloud`
+
+ - `UpCloud JSON metadata`_
+
For more information on network configuration formats
.. toctree::
@@ -257,5 +261,6 @@ Example output converting V2 to sysconfig:
.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index
.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
+.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
.. vi: textwidth=78
diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst
new file mode 100644
index 00000000..5b702bd2
--- /dev/null
+++ b/doc/rtd/topics/testing.rst
@@ -0,0 +1,173 @@
+*******
+Testing
+*******
+
+cloud-init has both unit tests and integration tests. Unit tests can
+be found in-tree alongside the source code, as well as
+at ``tests/unittests``. Integration tests can be found at
+``tests/integration_tests``. Documentation specifically for integration
+tests can be found on the :ref:`integration_tests` page, but
+the guidelines specified below apply to both types of tests.
+
+cloud-init uses `pytest`_ to run its tests, and has tests written both
+as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
+
+Guidelines
+==========
+
+The following guidelines should be followed.
+
+Test Layout
+-----------
+
+* For ease of organisation and greater accessibility for developers not
+ familiar with pytest, all cloud-init unit tests must be contained
+ within test classes
+
+ * Put another way, module-level test functions should not be used
+
+* As all tests are contained within classes, it is acceptable to mix
+ ``TestCase`` test classes and pytest test classes within the same
+ test file
+
+ * These can be easily distinguished by their definition: pytest
+ classes will not use inheritance at all (e.g.
+ `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
+ subclass (indirectly) from ``TestCase`` (e.g.
+ `TestPrependBaseCommands`_)
+
+``pytest`` Tests
+----------------
+
+* pytest test classes should use `pytest fixtures`_ to share
+ functionality instead of inheritance
+
+* pytest tests should use bare ``assert`` statements, to take advantage
+ of pytest's `assertion introspection`_
+
+ * For ``==`` and other commutative assertions, the expected value
+ should be placed before the value under test:
+ ``assert expected_value == function_under_test()``
+
+
+``pytest`` Version Gotchas
+--------------------------
+
+As we still support Ubuntu 16.04 (Xenial Xerus), we can only use pytest
+features that are available in v2.8.7. This is an inexhaustive list of
+ways in which this may catch you out:
+
+* Support for using ``yield`` in ``pytest.fixture`` functions was only
+ introduced in `pytest 3.0`_. Such functions must instead use the
+ ``pytest.yield_fixture`` decorator.
+
+* Only the following built-in fixtures are available [#fixture-list]_:
+
+ * ``cache``
+ * ``capfd``
+ * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial)
+ * ``capsys``
+ * ``monkeypatch``
+ * ``pytestconfig``
+ * ``record_xml_property``
+ * ``recwarn``
+ * ``tmpdir_factory``
+ * ``tmpdir``
+
+* On xenial, the objects returned by the ``tmpdir`` fixture cannot be
+ used where paths are required; they are rejected as invalid paths.
+ You must instead use their ``.strpath`` attribute.
+
+ * For example, instead of ``util.write_file(tmpdir.join("some_file"),
+ ...)``, you should write
+ ``util.write_file(tmpdir.join("some_file").strpath, ...)``.
+
+* The `pytest.param`_ function cannot be used. It was introduced in
+ pytest 3.1, which means it is not available on xenial. The more
+ limited mechanism it replaced was removed in pytest 4.0, so is not
+ available in focal or later. The only available alternatives are to
+ write mark-requiring test instances as completely separate tests,
+ without utilising parameterisation, or to apply the mark to the
+ entire parameterized test (and therefore every test instance).
+
+Mocking and Assertions
+----------------------
+
+* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
+ should start with ``m_`` to clearly distinguish them from non-mock
+ variables
+
+ * For example, ``m_readurl`` (which would be a mock for ``readurl``)
+
+* The ``assert_*`` methods that are available on ``Mock`` and
+ ``MagicMock`` objects should be avoided, as typos in these method
+ names may not raise ``AttributeError`` (and so can cause tests to
+ silently pass). An important exception: if a ``Mock`` is
+ `autospecced`_ then misspelled assertion methods *will* raise an
+ ``AttributeError``, so these assertion methods may be used on
+ autospecced ``Mock`` objects.
+
+ For non-autospecced ``Mock`` s, these substitutions can be used
+ (``m`` is assumed to be a ``Mock``):
+
+ * ``m.assert_any_call(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) in m.call_args_list``
+ * ``m.assert_called()`` => ``assert 0 != m.call_count``
+ * ``m.assert_called_once()`` => ``assert 1 == m.call_count``
+ * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert
+ [mock.call(*args, **kwargs)] == m.call_args_list``
+ * ``m.assert_called_with(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) == m.call_args_list[-1]``
+ * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in
+ call_list: assert call in m.call_args_list``
+
+ * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(...,
+ any_order=False)`` are not easily replicated in a single
+ statement, so their use when appropriate is acceptable.
+
+ * ``m.assert_not_called()`` => ``assert 0 == m.call_count``
+
+* When there are multiple patch calls in a test file for the module it
+ is testing, it may be desirable to capture the shared string prefix
+ for these patch calls in a module-level variable. If used, such
+ variables should be named ``M_PATH`` or, for datasource tests,
+ ``DS_PATH``.
+
+Test Argument Ordering
+----------------------
+
+* Test arguments should be ordered as follows:
+
+ * ``mock.patch`` arguments. When used as a decorator, ``mock.patch``
+ partially applies its generated ``Mock`` object as the first
+ argument, so these arguments must go first.
+ * ``pytest.mark.parametrize`` arguments, in the order specified to
+ the ``parametrize`` decorator. These arguments are also provided
+ by a decorator, so it's natural that they sit next to the
+ ``mock.patch`` arguments.
+ * Fixture arguments, alphabetically. These are not provided by a
+ decorator, so they are last, and their order has no defined
+ meaning, so we default to alphabetical.
+
+* It follows from this ordering of test arguments (so that we retain
+ the property that arguments left-to-right correspond to decorators
+ bottom-to-top) that test decorators should be ordered as follows:
+
+ * ``pytest.mark.parametrize``
+ * ``mock.patch``
+
+.. [#fixture-list] This list of fixtures (with markup) can be
+ reproduced by running::
+
+ py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/'
+
+ in a xenial lxd container with python3-pytest-catchlog installed.
+
+.. _pytest: https://docs.pytest.org/
+.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
+.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
+.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9
+.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
+.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
+.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param
+.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 3648a0f1..95891356 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@4b8d2cd5ac6316810ce16d081842da575625ca4f
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@96b146ee1beb99b8e44e36525e18a9a20e00c3f2
pytest
diff --git a/tests/cloud_tests/testcases/examples/TODO.md b/tests/cloud_tests/testcases/examples/TODO.md
index 8db0e98e..cde699a7 100644
--- a/tests/cloud_tests/testcases/examples/TODO.md
+++ b/tests/cloud_tests/testcases/examples/TODO.md
@@ -6,7 +6,7 @@ Below lists each of the issing examples and why it is not currently added.
- Puppet (takes > 60 seconds to run)
- Manage resolve.conf (lxd backend overrides changes)
- Adding a yum repository (need centos system)
- - Register RedHat Subscription (need centos system + subscription)
+ - Register Red Hat Subscription (need centos system + subscription)
- Adjust mount points mounted (need multiple disks)
- Call a url when finished (need end point)
- Reboot/poweroff when finished (how to test)
diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py
new file mode 100644
index 00000000..e1d4cd28
--- /dev/null
+++ b/tests/integration_tests/__init__.py
@@ -0,0 +1,12 @@
+import random
+
+
+def random_mac_address() -> str:
+ """Generate a random MAC address.
+
+ The MAC address will have a 1 in its least significant bit, indicating it
+ to be a locally administered address.
+ """
+ return "02:00:00:%02x:%02x:%02x" % (random.randint(0, 255),
+ random.randint(0, 255),
+ random.randint(0, 255))
diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py
new file mode 100644
index 00000000..534cfb9a
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh570.py
@@ -0,0 +1,39 @@
+"""Integration test for #570.
+
+Test that we can add optional vendor-data to the seedfrom file in a
+NoCloud environment
+"""
+
+from tests.integration_tests.instances import IntegrationInstance
+import pytest
+
+VENDOR_DATA = """\
+#cloud-config
+runcmd:
+ - touch /var/tmp/seeded_vendordata_test_file
+"""
+
+
+# Only running on LXD because we need NoCloud for this test
+@pytest.mark.sru_2020_11
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+def test_nocloud_seedfrom_vendordata(client: IntegrationInstance):
+ seed_dir = '/var/tmp/test_seed_dir'
+ result = client.execute(
+ "mkdir {seed_dir} && "
+ "touch {seed_dir}/user-data && "
+ "touch {seed_dir}/meta-data && "
+ "echo 'seedfrom: {seed_dir}/' > "
+ "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir)
+ )
+ assert result.return_code == 0
+
+ client.write_to_file(
+ '{}/vendor-data'.format(seed_dir),
+ VENDOR_DATA,
+ )
+ client.execute('cloud-init clean --logs')
+ client.restart()
+ assert client.execute('cloud-init status').ok
+ assert 'seeded_vendordata_test_file' in client.execute('ls /var/tmp')
diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py
new file mode 100644
index 00000000..dba01b34
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh626.py
@@ -0,0 +1,47 @@
+"""Integration test for gh-626.
+
+Ensure if wakeonlan is specified in the network config that it is rendered
+in the /etc/network/interfaces or netplan config.
+"""
+
+import pytest
+import yaml
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+
+
+MAC_ADDRESS = random_mac_address()
+NETWORK_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ wakeonlan: true
+ match:
+ macaddress: {}
+""".format(MAC_ADDRESS)
+
+EXPECTED_ENI_END = """\
+iface eth0 inet dhcp
+ ethernet-wol g"""
+
+
+@pytest.mark.sru_2020_11
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_config_dict({
+ 'user.network-config': NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+})
+def test_wakeonlan(client: IntegrationInstance):
+ if ImageSpecification.from_os_image().release == 'xenial':
+ eni = client.execute('cat /etc/network/interfaces.d/50-cloud-init.cfg')
+ assert eni.endswith(EXPECTED_ENI_END)
+ return
+
+ netplan_cfg = client.execute('cat /etc/netplan/50-cloud-init.yaml')
+ netplan_yaml = yaml.safe_load(netplan_cfg)
+ assert 'wakeonlan' in netplan_yaml['network']['ethernets']['eth0']
+ assert netplan_yaml['network']['ethernets']['eth0']['wakeonlan'] is True
diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py
new file mode 100644
index 00000000..3c1f9347
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh632.py
@@ -0,0 +1,33 @@
+"""Integration test for gh-632.
+
+Verify that if cloud-init is using DataSourceRbxCloud, there is
+no traceback if the metadata disk cannot be found.
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+# With some datasource hacking, we can run this on a NoCloud instance
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.sru_2020_11
+def test_datasource_rbx_no_stacktrace(client: IntegrationInstance):
+ client.write_to_file(
+ '/etc/cloud/cloud.cfg.d/90_dpkg.cfg',
+ 'datasource_list: [ RbxCloud, NoCloud ]\n',
+ )
+ client.write_to_file(
+ '/etc/cloud/ds-identify.cfg',
+ 'policy: enabled\n',
+ )
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'WARNING' not in log
+ assert 'Traceback' not in log
+ assert 'Failed to load metadata and userdata' not in log
+ assert ("Getting data from <class 'cloudinit.sources.DataSourceRbxCloud."
+ "DataSourceRbxCloud'> failed") not in log
diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py
new file mode 100644
index 00000000..ce57052e
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh668.py
@@ -0,0 +1,43 @@
+"""Integration test for gh-668.
+
+Ensure that static route to host is working correctly.
+The original problem is specific to the ENI renderer but that test is suitable
+for all network configuration outputs.
+"""
+
+import pytest
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.instances import IntegrationInstance
+
+
+DESTINATION_IP = "172.16.0.10"
+GATEWAY_IP = "10.0.0.100"
+MAC_ADDRESS = random_mac_address()
+
+NETWORK_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ addresses: [10.0.0.10/8]
+ dhcp4: false
+ routes:
+ - to: {}/32
+ via: {}
+ match:
+ macaddress: {}
+""".format(DESTINATION_IP, GATEWAY_IP, MAC_ADDRESS)
+
+EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP)
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_config_dict({
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+})
+@pytest.mark.lxd_use_exec
+def test_static_route_to_host(client: IntegrationInstance):
+ route = client.execute("ip route | grep {}".format(DESTINATION_IP))
+ assert route.startswith(EXPECTED_ROUTE)
diff --git a/tests/integration_tests/bugs/test_gh671.py b/tests/integration_tests/bugs/test_gh671.py
new file mode 100644
index 00000000..5e90cdda
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh671.py
@@ -0,0 +1,55 @@
+"""Integration test for gh-671.
+
+Verify that on Azure that if a default user and password are specified
+through the Azure API that a change in the default password overwrites
+the old password
+"""
+
+import crypt
+
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+
+OLD_PASSWORD = 'DoIM33tTheComplexityRequirements!??'
+NEW_PASSWORD = 'DoIM33tTheComplexityRequirementsNow!??'
+
+
+def _check_password(instance, unhashed_password):
+ shadow_password = instance.execute('getent shadow ubuntu').split(':')[1]
+ salt = shadow_password.rsplit('$', 1)[0]
+ hashed_password = crypt.crypt(unhashed_password, salt)
+ assert shadow_password == hashed_password
+
+
+@pytest.mark.azure
+@pytest.mark.sru_2020_11
+def test_update_default_password(setup_image, session_cloud: IntegrationCloud):
+ os_profile = {
+ 'os_profile': {
+ 'admin_password': '',
+ 'linux_configuration': {
+ 'disable_password_authentication': False
+ }
+ }
+ }
+ os_profile['os_profile']['admin_password'] = OLD_PASSWORD
+ instance1 = session_cloud.launch(launch_kwargs={'vm_params': os_profile})
+
+ _check_password(instance1, OLD_PASSWORD)
+
+ snapshot_id = instance1.cloud.cloud_instance.snapshot(
+ instance1.instance,
+ delete_provisioned_user=False
+ )
+
+ os_profile['os_profile']['admin_password'] = NEW_PASSWORD
+ try:
+ with session_cloud.launch(launch_kwargs={
+ 'image_id': snapshot_id,
+ 'vm_params': os_profile,
+ }) as instance2:
+ _check_password(instance2, NEW_PASSWORD)
+ finally:
+ session_cloud.cloud_instance.delete_image(snapshot_id)
+ instance1.destroy()
diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py
new file mode 100644
index 00000000..7ad0e809
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1813396.py
@@ -0,0 +1,34 @@
+"""Integration test for lp-1813396
+
+Ensure gpg is called with no tty flag.
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.log_utils import verify_ordered_items_in_text
+
+
+USER_DATA = """\
+#cloud-config
+apt:
+ sources:
+ cloudinit:
+ source: 'deb [arch=amd64] http://ppa.launchpad.net/cloud-init-dev/daily/ubuntu focal main'
+ keyserver: keyserver.ubuntu.com
+ keyid: E4D304DF
+""" # noqa: E501
+
+
+@pytest.mark.sru_2020_11
+@pytest.mark.user_data(USER_DATA)
+def test_gpg_no_tty(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ to_verify = [
+ "Running command ['gpg', '--no-tty', "
+ "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] "
+ "with allowed return codes [0] (shell=False, capture=True)",
+ "Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'",
+ "finish: modules-config/config-apt-configure: SUCCESS",
+ ]
+ verify_ordered_items_in_text(to_verify, log)
diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py
new file mode 100644
index 00000000..660d2a2a
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1835584.py
@@ -0,0 +1,104 @@
+""" Integration test for LP #1835584
+
+Upstream linux kernels prior to 4.15 providate DMI product_uuid in uppercase.
+More recent kernels switched to lowercase for DMI product_uuid. Azure
+datasource uses this product_uuid as the instance-id for cloud-init.
+
+The linux-azure-fips kernel installed in PRO FIPs images, that product UUID is
+uppercase whereas the linux-azure cloud-optimized kernel reports the UUID as
+lowercase.
+
+In cases where product_uuid changes case, ensure cloud-init doesn't
+recreate ssh hostkeys across reboot (due to detecting an instance_id change).
+
+This currently only affects linux-azure-fips -> linux-azure on Bionic.
+This test won't run on Xenial because both linux-azure-fips and linux-azure
+report uppercase product_uuids.
+
+The test will launch a specific Bionic Ubuntu PRO FIPS image which has a
+linux-azure-fips kernel known to report product_uuid as uppercase. Then upgrade
+and reboot into linux-azure kernel which is known to report product_uuid as
+lowercase.
+
+Across the reboot, assert that we didn't re-run config_ssh by virtue of
+seeing only one semaphore creation log entry of type:
+
+ Writing to /var/lib/cloud/instances/<UUID>/sem/config_ssh -
+
+https://bugs.launchpad.net/cloud-init/+bug/1835584
+"""
+import re
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationAzureInstance
+from tests.integration_tests.clouds import (
+ ImageSpecification, IntegrationCloud
+)
+from tests.integration_tests.conftest import get_validated_source
+
+
+IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = (
+ "Canonical:0001-com-ubuntu-pro-bionic-fips:pro-fips-18_04:18.04.202010201"
+)
+
+
+def _check_iid_insensitive_across_kernel_upgrade(
+ instance: IntegrationAzureInstance
+):
+ uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert uuid.isupper(), (
+ "Expected uppercase UUID on Ubuntu FIPS image {}".format(
+ uuid
+ )
+ )
+ orig_kernel = instance.execute("uname -r").strip()
+ assert "azure-fips" in orig_kernel
+ result = instance.execute("apt-get update")
+ # Install a 5.4+ kernel which provides lowercase product_uuid
+ result = instance.execute("apt-get install linux-azure --assume-yes")
+ if not result.ok:
+ pytest.fail("Unable to install linux-azure kernel: {}".format(result))
+ instance.restart()
+ new_kernel = instance.execute("uname -r").strip()
+ assert orig_kernel != new_kernel
+ assert "azure-fips" not in new_kernel
+ assert "azure" in new_kernel
+ new_uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert (
+ uuid.lower() == new_uuid
+ ), "Expected UUID on linux-azure to be lowercase of FIPS: {}".format(uuid)
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ RE_CONFIG_SSH_SEMAPHORE = r"Writing.*sem/config_ssh "
+ ssh_runs = len(re.findall(RE_CONFIG_SSH_SEMAPHORE, log))
+ assert 1 == ssh_runs, "config_ssh ran too many times {}".format(ssh_runs)
+
+
+@pytest.mark.azure
+@pytest.mark.sru_next
+def test_azure_kernel_upgrade_case_insensitive_uuid(
+ session_cloud: IntegrationCloud
+):
+ cfg_image_spec = ImageSpecification.from_os_image()
+ if (cfg_image_spec.os, cfg_image_spec.release) != ("ubuntu", "bionic"):
+ pytest.skip(
+ "Test only supports ubuntu:bionic not {0.os}:{0.release}".format(
+ cfg_image_spec
+ )
+ )
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ pytest.skip(
+ "Provide CLOUD_INIT_SOURCE to install expected working cloud-init"
+ )
+ image_id = IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC
+ with session_cloud.launch(
+ launch_kwargs={"image_id": image_id}
+ ) as instance:
+ # We can't use setup_image fixture here because we want to avoid
+ # taking a snapshot or cleaning the booted machine after cloud-init
+ # upgrade.
+ instance.install_new_cloud_init(
+ source, take_snapshot=False, clean=False
+ )
+ _check_iid_insensitive_across_kernel_upgrade(instance)
diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py
new file mode 100644
index 00000000..bde93d06
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1898997.py
@@ -0,0 +1,73 @@
+"""Integration test for LP: #1898997
+
+cloud-init was incorrectly excluding Open vSwitch bridge members from its list
+of interfaces. This meant that instances which had only one interface which
+was in an Open vSwitch bridge would not boot correctly: cloud-init would not
+find the expected physical interfaces, so would not apply network config.
+
+This test checks that cloud-init believes it has successfully applied the
+network configuration, and confirms that the bridge can be used to ping the
+default gateway.
+"""
+import pytest
+from tests.integration_tests import random_mac_address
+
+MAC_ADDRESS = random_mac_address()
+
+
+NETWORK_CONFIG = """\
+bridges:
+ ovs-br:
+ dhcp4: true
+ interfaces:
+ - enp5s0
+ macaddress: 52:54:00:d9:08:1c
+ mtu: 1500
+ openvswitch: {{}}
+ethernets:
+ enp5s0:
+ mtu: 1500
+ set-name: enp5s0
+ match:
+ macaddress: {}
+version: 2
+""".format(MAC_ADDRESS)
+
+
+@pytest.mark.lxd_config_dict({
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+})
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_use_exec
+@pytest.mark.not_bionic
+@pytest.mark.not_xenial
+@pytest.mark.sru_2020_11
+@pytest.mark.ubuntu
+class TestInterfaceListingWithOpenvSwitch:
+ def test_ovs_member_interfaces_not_excluded(self, client):
+ # We need to install openvswitch for our provided network configuration
+ # to apply (on next boot), so DHCP on our default interface to fetch it
+ client.execute("dhclient enp5s0")
+ client.execute("apt update -qqy")
+ client.execute("apt-get install -qqy openvswitch-switch")
+
+ # Now our networking config should successfully apply on a clean reboot
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ cloudinit_output = client.read_from_file("/var/log/cloud-init.log")
+
+ # Confirm that the network configuration was applied successfully
+ assert "WARN" not in cloudinit_output
+ # Confirm that the applied network config created the OVS bridge
+ assert "ovs-br" in client.execute("ip addr")
+
+ # Test that we can ping our gateway using our bridge
+ gateway = client.execute(
+ "ip -4 route show default | awk '{ print $3 }'"
+ )
+ ping_result = client.execute(
+ "ping -c 1 -W 1 -I ovs-br {}".format(gateway)
+ )
+ assert ping_result.ok
diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py
index 3fe7d0d0..fcc2b751 100644
--- a/tests/integration_tests/bugs/test_lp1900837.py
+++ b/tests/integration_tests/bugs/test_lp1900837.py
@@ -22,7 +22,8 @@ class TestLogPermissionsNotResetOnReboot:
assert "600" == _get_log_perms(client)
# Reboot
- client.instance.restart()
+ client.restart()
+ assert client.execute('cloud-init status').ok
# Check that permissions are not reset on reboot
assert "600" == _get_log_perms(client)
diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py
new file mode 100644
index 00000000..2b47f0a8
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1901011.py
@@ -0,0 +1,58 @@
+"""Integration test for LP: #1901011
+
+Ensure an ephemeral disk exists after boot.
+
+See https://github.com/canonical/cloud-init/pull/800
+"""
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+
+
+@pytest.mark.azure
+@pytest.mark.parametrize('instance_type,is_ephemeral', [
+ ('Standard_DS1_v2', True),
+ ('Standard_D2s_v4', False),
+])
+def test_ephemeral(instance_type, is_ephemeral,
+ session_cloud: IntegrationCloud, setup_image):
+ if is_ephemeral:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ )
+ else:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' does "
+ "not exist. Not merging default Azure cloud ephemeral disk "
+ "configs."
+ )
+
+ with session_cloud.launch(
+ launch_kwargs={'instance_type': instance_type}
+ ) as client:
+ # Verify log file
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert expected_log in log
+
+ # Verify devices
+ dev_links = client.execute('ls /dev/disk/cloud')
+ assert 'azure_root' in dev_links
+ assert 'azure_root-part1' in dev_links
+ if is_ephemeral:
+ assert 'azure_resource' in dev_links
+ assert 'azure_resource-part1' in dev_links
+
+ # Verify mounts
+ blks = client.execute('lsblk -pPo NAME,TYPE,MOUNTPOINT')
+ root_device = client.execute(
+ 'realpath /dev/disk/cloud/azure_root-part1'
+ )
+ assert 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format(
+ root_device) in blks
+ if is_ephemeral:
+ ephemeral_device = client.execute(
+ 'realpath /dev/disk/cloud/azure_resource-part1'
+ )
+ assert 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format(
+ ephemeral_device) in blks
diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py
new file mode 100644
index 00000000..87f92d5e
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1910835.py
@@ -0,0 +1,66 @@
+"""Integration test for LP: #1910835.
+
+If users do not provide an SSH key and instead ask Azure to generate a key for
+them, the key material available in the IMDS may include CRLF sequences. Prior
+to e56b55452549cb037da0a4165154ffa494e9678a, the Azure datasource handled keys
+via a certificate, the tooling for which removed these sequences. This test
+ensures that cloud-init does not regress support for this Azure behaviour.
+
+This test provides the SSH key configured for tests to the instance in two
+ways: firstly, with CRLFs to mimic the generated keys, via the Azure API;
+secondly, as user-data in unmodified form. This means that even on systems
+which exhibit the bug fetching the platform's metadata, we can SSH into the SUT
+to confirm this (instead of having to assert SSH failure; there are lots of
+reasons SSH might fail).
+
+Once SSH'd in, we check that the two keys in .ssh/authorized_keys have the same
+material: if the Azure datasource has removed the CRLFs correctly, then they
+will match.
+"""
+import pytest
+
+
+USER_DATA_TMPL = """\
+#cloud-config
+ssh_authorized_keys:
+ - {}"""
+
+
+@pytest.mark.sru_2021_01
+@pytest.mark.azure
+def test_crlf_in_azure_metadata_ssh_keys(session_cloud, setup_image):
+ authorized_keys_path = "/home/{}/.ssh/authorized_keys".format(
+ session_cloud.cloud_instance.username
+ )
+ # Pass in user-data to allow us to access the instance when the normal
+ # path fails
+ key_data = session_cloud.cloud_instance.key_pair.public_key_content
+ user_data = USER_DATA_TMPL.format(key_data)
+ # Throw a CRLF into the otherwise good key data, to emulate Azure's
+ # behaviour for generated keys
+ key_data = key_data[:20] + "\r\n" + key_data[20:]
+ vm_params = {
+ "os_profile": {
+ "linux_configuration": {
+ "ssh": {
+ "public_keys": [
+ {"path": authorized_keys_path, "key_data": key_data}
+ ]
+ }
+ }
+ }
+ }
+ with session_cloud.launch(
+ launch_kwargs={"vm_params": vm_params, "user_data": user_data}
+ ) as client:
+ authorized_keys = (
+ client.read_from_file(authorized_keys_path).strip().splitlines()
+ )
+ # We expect one key from the cloud, one from user-data
+ assert 2 == len(authorized_keys)
+ # And those two keys should be the same, except for a possible key
+ # comment, which Azure strips out
+ assert (
+ authorized_keys[0].rsplit(" ")[:2]
+ == authorized_keys[1].split(" ")[:2]
+ )
diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py
new file mode 100644
index 00000000..efafae50
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1912844.py
@@ -0,0 +1,103 @@
+"""Integration test for LP: #1912844
+
+cloud-init should ignore OVS-internal interfaces when performing its own
+interface determination: these interfaces are handled fully by OVS, so
+cloud-init should never need to touch them.
+
+This test is a semi-synthetic reproducer for the bug. It uses a similar
+network configuration, tweaked slightly to DHCP in a way that will succeed even
+on "failed" boots. The exact bug doesn't reproduce with the NoCloud
+datasource, because it runs at init-local time (whereas the MAAS datasource,
+from the report, runs only at init (network) time): this means that the
+networking code runs before OVS creates its interfaces (which happens after
+init-local but, of course, before networking is up), and so doesn't generate
+the traceback that they cause. We work around this by calling
+``get_interfaces_by_mac` directly in the test code.
+"""
+import pytest
+
+from tests.integration_tests import random_mac_address
+
+MAC_ADDRESS = random_mac_address()
+
+NETWORK_CONFIG = """\
+bonds:
+ bond0:
+ interfaces:
+ - enp5s0
+ macaddress: {0}
+ mtu: 1500
+bridges:
+ ovs-br:
+ interfaces:
+ - bond0
+ macaddress: {0}
+ mtu: 1500
+ openvswitch: {{}}
+ dhcp4: true
+ethernets:
+ enp5s0:
+ mtu: 1500
+ set-name: enp5s0
+ match:
+ macaddress: {0}
+version: 2
+vlans:
+ ovs-br.100:
+ id: 100
+ link: ovs-br
+ mtu: 1500
+ ovs-br.200:
+ id: 200
+ link: ovs-br
+ mtu: 1500
+""".format(MAC_ADDRESS)
+
+
+SETUP_USER_DATA = """\
+#cloud-config
+packages:
+- openvswitch-switch
+"""
+
+
+@pytest.fixture
+def ovs_enabled_session_cloud(session_cloud):
+ """A session_cloud wrapper, to use an OVS-enabled image for tests.
+
+ This implementation is complicated by wanting to use ``session_cloud``s
+ snapshot cleanup/retention logic, to avoid having to reimplement that here.
+ """
+ old_snapshot_id = session_cloud.snapshot_id
+ with session_cloud.launch(
+ user_data=SETUP_USER_DATA,
+ ) as instance:
+ instance.instance.clean()
+ session_cloud.snapshot_id = instance.snapshot()
+
+ yield session_cloud
+
+ try:
+ session_cloud.delete_snapshot()
+ finally:
+ session_cloud.snapshot_id = old_snapshot_id
+
+
+@pytest.mark.lxd_vm
+def test_get_interfaces_by_mac_doesnt_traceback(ovs_enabled_session_cloud):
+ """Launch our OVS-enabled image and confirm the bug doesn't reproduce."""
+ launch_kwargs = {
+ "config_dict": {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ },
+ }
+ with ovs_enabled_session_cloud.launch(
+ launch_kwargs=launch_kwargs,
+ ) as client:
+ result = client.execute(
+ "python3 -c"
+ "'from cloudinit.net import get_interfaces_by_mac;"
+ "get_interfaces_by_mac()'"
+ )
+ assert result.ok
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 88ac4408..a6026309 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -1,12 +1,22 @@
# This file is part of cloud-init. See LICENSE file for license information.
from abc import ABC, abstractmethod
import logging
-
-from pycloudlib import EC2, GCE, Azure, OCI, LXDContainer, LXDVirtualMachine
+import os.path
+from uuid import UUID
+
+from pycloudlib import (
+ EC2,
+ GCE,
+ Azure,
+ OCI,
+ LXDContainer,
+ LXDVirtualMachine,
+ Openstack,
+)
from pycloudlib.lxd.instance import LXDInstance
import cloudinit
-from cloudinit.subp import subp
+from cloudinit.subp import subp, ProcessExecutionError
from tests.integration_tests import integration_settings
from tests.integration_tests.instances import (
IntegrationEc2Instance,
@@ -25,6 +35,65 @@ except ImportError:
log = logging.getLogger('integration_testing')
+def _get_ubuntu_series() -> list:
+ """Use distro-info-data's ubuntu.csv to get a list of Ubuntu series"""
+ out = ""
+ try:
+ out, _err = subp(["ubuntu-distro-info", "-a"])
+ except ProcessExecutionError:
+ log.info(
+ "ubuntu-distro-info (from the distro-info package) must be"
+ " installed to guess Ubuntu os/release"
+ )
+ return out.splitlines()
+
+
+class ImageSpecification:
+ """A specification of an image to launch for testing.
+
+ If either of ``os`` and ``release`` are not specified, an attempt will be
+ made to infer the correct values for these on instantiation.
+
+ :param image_id:
+ The image identifier used by the rest of the codebase to launch this
+ image.
+ :param os:
+ An optional string describing the operating system this image is for
+ (e.g. "ubuntu", "rhel", "freebsd").
+ :param release:
+ A optional string describing the operating system release (e.g.
+ "focal", "8"; the exact values here will depend on the OS).
+ """
+
+ def __init__(
+ self,
+ image_id: str,
+ os: "Optional[str]" = None,
+ release: "Optional[str]" = None,
+ ):
+ if image_id in _get_ubuntu_series():
+ if os is None:
+ os = "ubuntu"
+ if release is None:
+ release = image_id
+
+ self.image_id = image_id
+ self.os = os
+ self.release = release
+ log.info(
+ "Detected image: image_id=%s os=%s release=%s",
+ self.image_id,
+ self.os,
+ self.release,
+ )
+
+ @classmethod
+ def from_os_image(cls):
+ """Return an ImageSpecification for integration_settings.OS_IMAGE."""
+ parts = integration_settings.OS_IMAGE.split("::", 2)
+ return cls(*parts)
+
+
class IntegrationCloud(ABC):
datasource = None # type: Optional[str]
integration_instance_cls = IntegrationInstance
@@ -32,7 +101,23 @@ class IntegrationCloud(ABC):
def __init__(self, settings=integration_settings):
self.settings = settings
self.cloud_instance = self._get_cloud_instance()
- self.image_id = self._get_initial_image()
+ if settings.PUBLIC_SSH_KEY is not None:
+ # If we have a non-default key, use it.
+ self.cloud_instance.use_key(
+ settings.PUBLIC_SSH_KEY, name=settings.KEYPAIR_NAME
+ )
+ elif settings.KEYPAIR_NAME is not None:
+ # Even if we're using the default key, it may still have a
+ # different name in the clouds, so we need to set it separately.
+ self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME
+ self._released_image_id = self._get_initial_image()
+ self.snapshot_id = None
+
+ @property
+ def image_id(self):
+ if self.snapshot_id:
+ return self.snapshot_id
+ return self._released_image_id
def emit_settings_to_log(self) -> None:
log.info(
@@ -50,21 +135,20 @@ class IntegrationCloud(ABC):
raise NotImplementedError
def _get_initial_image(self):
- image_id = self.settings.OS_IMAGE
+ image = ImageSpecification.from_os_image()
try:
- image_id = self.cloud_instance.released_image(
- self.settings.OS_IMAGE)
+ return self.cloud_instance.released_image(image.image_id)
except (ValueError, IndexError):
- pass
- return image_id
+ return image.image_id
def _perform_launch(self, launch_kwargs):
pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
- pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
return pycloudlib_instance
def launch(self, user_data=None, launch_kwargs=None,
settings=integration_settings):
+ if launch_kwargs is None:
+ launch_kwargs = {}
if self.settings.EXISTING_INSTANCE_ID:
log.info(
'Not launching instance due to EXISTING_INSTANCE_ID. '
@@ -76,20 +160,26 @@ class IntegrationCloud(ABC):
kwargs = {
'image_id': self.image_id,
'user_data': user_data,
- 'wait': False,
}
- if launch_kwargs:
- kwargs.update(launch_kwargs)
+ kwargs.update(launch_kwargs)
log.info(
- "Launching instance with launch_kwargs:\n{}".format(
- "\n".join("{}={}".format(*item) for item in kwargs.items())
- )
+ "Launching instance with launch_kwargs:\n%s",
+ "\n".join("{}={}".format(*item) for item in kwargs.items())
)
pycloudlib_instance = self._perform_launch(kwargs)
-
log.info('Launched instance: %s', pycloudlib_instance)
- return self.get_instance(pycloudlib_instance, settings)
+ instance = self.get_instance(pycloudlib_instance, settings)
+ if kwargs.get('wait', True):
+ # If we aren't waiting, we can't rely on command execution here
+ log.info(
+ 'cloud-init version: %s',
+ instance.execute("cloud-init --version")
+ )
+ serial = instance.execute("grep serial /etc/cloud/build.info")
+ if serial:
+ log.info('image serial: %s', serial.split()[1])
+ return instance
def get_instance(self, cloud_instance, settings=integration_settings):
return self.integration_instance_cls(self, cloud_instance, settings)
@@ -100,6 +190,19 @@ class IntegrationCloud(ABC):
def snapshot(self, instance):
return self.cloud_instance.snapshot(instance, clean=True)
+ def delete_snapshot(self):
+ if self.snapshot_id:
+ if self.settings.KEEP_IMAGE:
+ log.info(
+ 'NOT deleting snapshot image created for this testrun '
+ 'because KEEP_IMAGE is True: %s', self.snapshot_id)
+ else:
+ log.info(
+ 'Deleting snapshot image created for this testrun: %s',
+ self.snapshot_id
+ )
+ self.cloud_instance.delete_image(self.snapshot_id)
+
class Ec2Cloud(IntegrationCloud):
datasource = 'ec2'
@@ -116,9 +219,6 @@ class GceCloud(IntegrationCloud):
def _get_cloud_instance(self):
return GCE(
tag='gce-integration-test',
- project=self.settings.GCE_PROJECT,
- region=self.settings.GCE_REGION,
- zone=self.settings.GCE_ZONE,
)
@@ -130,7 +230,14 @@ class AzureCloud(IntegrationCloud):
return Azure(tag='azure-integration-test')
def destroy(self):
- self.cloud_instance.delete_resource_group()
+ if self.settings.KEEP_INSTANCE:
+ log.info(
+ 'NOT deleting resource group because KEEP_INSTANCE is true '
+ 'and deleting resource group would also delete instance. '
+ 'Instance and resource group must both be manually deleted.'
+ )
+ else:
+ self.cloud_instance.delete_resource_group()
class OciCloud(IntegrationCloud):
@@ -139,8 +246,7 @@ class OciCloud(IntegrationCloud):
def _get_cloud_instance(self):
return OCI(
- tag='oci-integration-test',
- compartment_id=self.settings.OCI_COMPARTMENT_ID
+ tag='oci-integration-test'
)
@@ -148,6 +254,7 @@ class _LxdIntegrationCloud(IntegrationCloud):
integration_instance_cls = IntegrationLxdInstance
def _get_cloud_instance(self):
+ # pylint: disable=no-member
return self.pycloudlib_instance_cls(tag=self.instance_tag)
@staticmethod
@@ -156,25 +263,36 @@ class _LxdIntegrationCloud(IntegrationCloud):
@staticmethod
def _mount_source(instance: LXDInstance):
- target_path = '/usr/lib/python3/dist-packages/cloudinit'
- format_variables = {
- 'name': instance.name,
- 'source_path': cloudinit.__path__[0],
- 'container_path': target_path,
- }
- log.info(
- 'Mounting source {source_path} directly onto LXD container/vm '
- 'named {name} at {container_path}'.format(**format_variables))
- command = (
- 'lxc config device add {name} host-cloud-init disk '
- 'source={source_path} '
- 'path={container_path}'
- ).format(**format_variables)
- subp(command.split())
+ cloudinit_path = cloudinit.__path__[0]
+ mounts = [
+ (cloudinit_path, '/usr/lib/python3/dist-packages/cloudinit'),
+ (os.path.join(cloudinit_path, '..', 'config', 'cloud.cfg.d'),
+ '/etc/cloud/cloud.cfg.d'),
+ (os.path.join(cloudinit_path, '..', 'templates'),
+ '/etc/cloud/templates'),
+ ]
+ for (n, (source_path, target_path)) in enumerate(mounts):
+ format_variables = {
+ 'name': instance.name,
+ 'source_path': os.path.realpath(source_path),
+ 'container_path': target_path,
+ 'idx': n,
+ }
+ log.info(
+ 'Mounting source %(source_path)s directly onto LXD'
+ ' container/VM named %(name)s at %(container_path)s',
+ format_variables
+ )
+ command = (
+ 'lxc config device add {name} host-cloud-init-{idx} disk '
+ 'source={source_path} '
+ 'path={container_path}'
+ ).format(**format_variables)
+ subp(command.split())
def _perform_launch(self, launch_kwargs):
launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None)
- launch_kwargs.pop('wait')
+ wait = launch_kwargs.pop('wait', True)
release = launch_kwargs.pop('image_id')
try:
@@ -190,8 +308,7 @@ class _LxdIntegrationCloud(IntegrationCloud):
)
if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
self._mount_source(pycloudlib_instance)
- pycloudlib_instance.start(wait=False)
- pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
+ pycloudlib_instance.start(wait=wait)
return pycloudlib_instance
@@ -213,3 +330,32 @@ class LxdVmCloud(_LxdIntegrationCloud):
self._profile_list = self.cloud_instance.build_necessary_profiles(
release)
return self._profile_list
+
+
+class OpenstackCloud(IntegrationCloud):
+ datasource = 'openstack'
+ integration_instance_cls = IntegrationInstance
+
+ def _get_cloud_instance(self):
+ if not integration_settings.OPENSTACK_NETWORK:
+ raise Exception(
+ 'OPENSTACK_NETWORK must be set to a valid Openstack network. '
+ 'If using the openstack CLI, try `openstack network list`'
+ )
+ return Openstack(
+ tag='openstack-integration-test',
+ network=integration_settings.OPENSTACK_NETWORK,
+ )
+
+ def _get_initial_image(self):
+ image = ImageSpecification.from_os_image()
+ try:
+ UUID(image.image_id)
+ except ValueError as e:
+ raise Exception(
+ 'When using Openstack, `OS_IMAGE` MUST be specified with '
+ 'a 36-character UUID image ID. Passing in a release name is '
+ 'not valid here.\n'
+ 'OS image id: {}'.format(image.image_id)
+ ) from e
+ return image.image_id
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 73b44bfc..6f4ce8d3 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -1,18 +1,30 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import datetime
+import functools
import logging
-import os
import pytest
+import os
import sys
+from tarfile import TarFile
from contextlib import contextmanager
+from pathlib import Path
from tests.integration_tests import integration_settings
from tests.integration_tests.clouds import (
+ AzureCloud,
Ec2Cloud,
GceCloud,
- AzureCloud,
- OciCloud,
+ ImageSpecification,
+ IntegrationCloud,
LxdContainerCloud,
LxdVmCloud,
+ OciCloud,
+ _LxdIntegrationCloud,
+ OpenstackCloud,
+)
+from tests.integration_tests.instances import (
+ CloudInitSource,
+ IntegrationInstance,
)
@@ -27,7 +39,22 @@ platforms = {
'oci': OciCloud,
'lxd_container': LxdContainerCloud,
'lxd_vm': LxdVmCloud,
+ 'openstack': OpenstackCloud,
}
+os_list = ["ubuntu"]
+
+session_start_time = datetime.datetime.now().strftime('%y%m%d%H%M%S')
+
+XENIAL_LXD_VM_EXEC_MSG = """\
+The default xenial images do not support `exec` for LXD VMs.
+
+Specify an image known to work using:
+
+ OS_IMAGE=<image id>::ubuntu::xenial
+
+You can re-run specifically tests that require this by passing `-m
+lxd_use_exec` to pytest.
+"""
def pytest_runtest_setup(item):
@@ -54,6 +81,18 @@ def pytest_runtest_setup(item):
if supported_platforms and current_platform not in supported_platforms:
pytest.skip(unsupported_message)
+ image = ImageSpecification.from_os_image()
+ current_os = image.os
+ supported_os_set = set(os_list).intersection(test_marks)
+ if current_os and supported_os_set and current_os not in supported_os_set:
+ pytest.skip("Cannot run on OS {}".format(current_os))
+ if 'unstable' in test_marks and not integration_settings.RUN_UNSTABLE:
+ pytest.skip('Test marked unstable. Manually remove mark to run it')
+
+ current_release = image.release
+ if "not_{}".format(current_release) in test_marks:
+ pytest.skip("Cannot run on release {}".format(current_release))
+
# disable_subp_usage is defined at a higher level, but we don't
# want it applied here
@@ -75,82 +114,160 @@ def session_cloud():
cloud = platforms[integration_settings.PLATFORM]()
cloud.emit_settings_to_log()
yield cloud
- cloud.destroy()
+ try:
+ cloud.delete_snapshot()
+ finally:
+ cloud.destroy()
+
+def get_validated_source(
+ session_cloud: IntegrationCloud,
+ source=integration_settings.CLOUD_INIT_SOURCE
+) -> CloudInitSource:
+ if source == 'NONE':
+ return CloudInitSource.NONE
+ elif source == 'IN_PLACE':
+ if session_cloud.datasource not in ['lxd_container', 'lxd_vm']:
+ raise ValueError(
+ 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD')
+ return CloudInitSource.IN_PLACE
+ elif source == 'PROPOSED':
+ return CloudInitSource.PROPOSED
+ elif source.startswith('ppa:'):
+ return CloudInitSource.PPA
+ elif os.path.isfile(str(source)):
+ return CloudInitSource.DEB_PACKAGE
+ elif source == "UPGRADE":
+ return CloudInitSource.UPGRADE
+ raise ValueError(
+ 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(source))
-@pytest.fixture(scope='session', autouse=True)
-def setup_image(session_cloud):
+
+@pytest.fixture(scope='session')
+def setup_image(session_cloud: IntegrationCloud):
"""Setup the target environment with the correct version of cloud-init.
So we can launch instances / run tests with the correct image
"""
- client = None
+
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ return
log.info('Setting up environment for %s', session_cloud.datasource)
- if integration_settings.CLOUD_INIT_SOURCE == 'NONE':
- pass # that was easy
- elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
- if session_cloud.datasource not in ['lxd_container', 'lxd_vm']:
- raise ValueError(
- 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD')
- # The mount needs to happen after the instance is created, so
- # no further action needed here
- elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED':
- client = session_cloud.launch()
- client.install_proposed_image()
- elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'):
- client = session_cloud.launch()
- client.install_ppa(integration_settings.CLOUD_INIT_SOURCE)
- elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)):
- client = session_cloud.launch()
- client.install_deb()
- else:
- raise ValueError(
- 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(
- integration_settings.CLOUD_INIT_SOURCE))
- if client:
- # Even if we're keeping instances, we don't want to keep this
- # one around as it was just for image creation
- client.destroy()
+ client = session_cloud.launch()
+ client.install_new_cloud_init(source)
+ # Even if we're keeping instances, we don't want to keep this
+ # one around as it was just for image creation
+ client.destroy()
log.info('Done with environment setup')
+def _collect_logs(instance: IntegrationInstance, node_id: str,
+ test_failed: bool):
+ """Collect logs from remote instance.
+
+ Args:
+ instance: The current IntegrationInstance to collect logs from
+ node_id: The pytest representation of this test, E.g.:
+ tests/integration_tests/test_example.py::TestExample.test_example
+ test_failed: If test failed or not
+ """
+ if any([
+ integration_settings.COLLECT_LOGS == 'NEVER',
+ integration_settings.COLLECT_LOGS == 'ON_ERROR' and not test_failed
+ ]):
+ return
+ instance.execute(
+ 'cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz')
+ node_id_path = Path(
+ node_id
+ .replace('.py', '') # Having a directory with '.py' would be weird
+ .replace('::', os.path.sep) # Turn classes/tests into paths
+ .replace('[', '-') # For parametrized names
+ .replace(']', '') # For parameterized names
+ )
+ log_dir = Path(
+ integration_settings.LOCAL_LOG_PATH
+ ) / session_start_time / node_id_path
+ log.info("Writing logs to %s", log_dir)
+ if not log_dir.exists():
+ log_dir.mkdir(parents=True)
+ tarball_path = log_dir / 'cloud-init.tar.gz'
+ instance.pull_file('/var/tmp/cloud-init.tar.gz', tarball_path)
+
+ tarball = TarFile.open(str(tarball_path))
+ tarball.extractall(path=str(log_dir))
+ tarball_path.unlink()
+
+
@contextmanager
-def _client(request, fixture_utils, session_cloud):
+def _client(request, fixture_utils, session_cloud: IntegrationCloud):
"""Fixture implementation for the client fixtures.
Launch the dynamic IntegrationClient instance using any provided
userdata, yield to the test, then cleanup
"""
- user_data = fixture_utils.closest_marker_first_arg_or(
- request, 'user_data', None)
- name = fixture_utils.closest_marker_first_arg_or(
- request, 'instance_name', None
+ getter = functools.partial(
+ fixture_utils.closest_marker_first_arg_or, request, default=None
)
+ user_data = getter('user_data')
+ name = getter('instance_name')
+ lxd_config_dict = getter('lxd_config_dict')
+ lxd_use_exec = fixture_utils.closest_marker_args_or(
+ request, 'lxd_use_exec', None
+ )
+
launch_kwargs = {}
if name is not None:
- launch_kwargs = {"name": name}
+ launch_kwargs["name"] = name
+ if lxd_config_dict is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_config_dict requires LXD")
+ launch_kwargs["config_dict"] = lxd_config_dict
+ if lxd_use_exec is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_use_exec requires LXD")
+ if isinstance(session_cloud, LxdVmCloud):
+ image_spec = ImageSpecification.from_os_image()
+ if image_spec.release == image_spec.image_id == "xenial":
+ # Why fail instead of skip? We expect that skipped tests will
+ # be run in a different one of our usual battery of test runs
+ # (e.g. LXD-only tests are skipped on EC2 but will run in our
+ # normal LXD test runs). This is not true of this test: it
+ # can't run in our usual xenial LXD VM test run, and it may not
+ # run anywhere else. A failure flags up this discrepancy.
+ pytest.fail(XENIAL_LXD_VM_EXEC_MSG)
+ launch_kwargs["execute_via_ssh"] = False
+
with session_cloud.launch(
user_data=user_data, launch_kwargs=launch_kwargs
) as instance:
+ if lxd_use_exec is not None:
+ # Existing instances are not affected by the launch kwargs, so
+ # ensure it here; we still need the launch kwarg so waiting works
+ instance.execute_via_ssh = False
+ previous_failures = request.session.testsfailed
yield instance
+ test_failed = request.session.testsfailed - previous_failures > 0
+ _collect_logs(instance, request.node.nodeid, test_failed)
@pytest.yield_fixture
-def client(request, fixture_utils, session_cloud):
+def client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs for every test."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
@pytest.yield_fixture(scope='module')
-def module_client(request, fixture_utils, session_cloud):
+def module_client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs once per module."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
@pytest.yield_fixture(scope='class')
-def class_client(request, fixture_utils, session_cloud):
+def class_client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs once per class."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
@@ -180,3 +297,20 @@ def pytest_assertrepr_compare(op, left, right):
'"{}" not in cloud-init.log string; unexpectedly found on'
" these lines:".format(left)
] + found_lines
+
+
+def pytest_configure(config):
+ """Perform initial configuration, before the test runs start.
+
+ This hook is only called if integration tests are being executed, so we can
+ use it to configure defaults for integration testing that differ from the
+ rest of the tests in the codebase.
+
+ See
+ https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure
+ for pytest's documentation.
+ """
+ if "log_cli_level" in config.option and not config.option.log_cli_level:
+ # If log_cli_level is available in this version of pytest and not set
+ # to anything, set it to INFO.
+ config.option.log_cli_level = "INFO"
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index 9b13288c..055ec758 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -1,4 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from enum import Enum
import logging
import os
import uuid
@@ -25,9 +26,28 @@ def _get_tmp_path():
return '/var/tmp/{}.tmp'.format(tmp_filename)
-class IntegrationInstance:
- use_sudo = True
+class CloudInitSource(Enum):
+ """Represents the cloud-init image source setting as a defined value.
+
+ Values here represent all possible values for CLOUD_INIT_SOURCE in
+ tests/integration_tests/integration_settings.py. See that file for an
+ explanation of these values. If the value set there can't be parsed into
+ one of these values, an exception will be raised
+ """
+ NONE = 1
+ IN_PLACE = 2
+ PROPOSED = 3
+ PPA = 4
+ DEB_PACKAGE = 5
+ UPGRADE = 6
+
+ def installs_new_version(self):
+ if self.name in [self.NONE.name, self.IN_PLACE.name]:
+ return False
+ return True
+
+class IntegrationInstance:
def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance,
settings=integration_settings):
self.cloud = cloud
@@ -37,24 +57,30 @@ class IntegrationInstance:
def destroy(self):
self.instance.delete()
- def execute(self, command, *, use_sudo=None) -> Result:
+ def restart(self):
+ """Restart this instance (via cloud mechanism) and wait for boot.
+
+ This wraps pycloudlib's `BaseInstance.restart`
+ """
+ log.info("Restarting instance and waiting for boot")
+ self.instance.restart()
+
+ def execute(self, command, *, use_sudo=True) -> Result:
if self.instance.username == 'root' and use_sudo is False:
raise Exception('Root user cannot run unprivileged')
- if use_sudo is None:
- use_sudo = self.use_sudo
return self.instance.execute(command, use_sudo=use_sudo)
def pull_file(self, remote_path, local_path):
# First copy to a temporary directory because of permissions issues
tmp_path = _get_tmp_path()
- self.instance.execute('cp {} {}'.format(remote_path, tmp_path))
- self.instance.pull_file(tmp_path, local_path)
+ self.instance.execute('cp {} {}'.format(str(remote_path), tmp_path))
+ self.instance.pull_file(tmp_path, str(local_path))
def push_file(self, local_path, remote_path):
# First push to a temporary directory because of permissions issues
tmp_path = _get_tmp_path()
- self.instance.push_file(local_path, tmp_path)
- self.execute('mv {} {}'.format(tmp_path, remote_path))
+ self.instance.push_file(str(local_path), tmp_path)
+ self.execute('mv {} {}'.format(tmp_path, str(remote_path)))
def read_from_file(self, remote_path) -> str:
result = self.execute('cat {}'.format(remote_path))
@@ -83,36 +109,56 @@ class IntegrationInstance:
os.unlink(tmp_file.name)
def snapshot(self):
- return self.cloud.snapshot(self.instance)
-
- def _install_new_cloud_init(self, remote_script):
- self.execute(remote_script)
+ image_id = self.cloud.snapshot(self.instance)
+ log.info('Created new image: %s', image_id)
+ return image_id
+
+ def install_new_cloud_init(
+ self,
+ source: CloudInitSource,
+ take_snapshot=True,
+ clean=True,
+ ):
+ if source == CloudInitSource.DEB_PACKAGE:
+ self.install_deb()
+ elif source == CloudInitSource.PPA:
+ self.install_ppa()
+ elif source == CloudInitSource.PROPOSED:
+ self.install_proposed_image()
+ elif source == CloudInitSource.UPGRADE:
+ self.upgrade_cloud_init()
+ else:
+ raise Exception(
+ "Specified to install {} which isn't supported here".format(
+ source)
+ )
version = self.execute('cloud-init -v').split()[-1]
log.info('Installed cloud-init version: %s', version)
- self.instance.clean()
- image_id = self.snapshot()
- log.info('Created new image: %s', image_id)
- self.cloud.image_id = image_id
+ if clean:
+ self.instance.clean()
+ if take_snapshot:
+ snapshot_id = self.snapshot()
+ self.cloud.snapshot_id = snapshot_id
def install_proposed_image(self):
log.info('Installing proposed image')
remote_script = (
- '{sudo} echo deb "http://archive.ubuntu.com/ubuntu '
+ 'echo deb "http://archive.ubuntu.com/ubuntu '
'$(lsb_release -sc)-proposed main" | '
- '{sudo} tee /etc/apt/sources.list.d/proposed.list\n'
- '{sudo} apt-get update -q\n'
- '{sudo} apt-get install -qy cloud-init'
- ).format(sudo='sudo' if self.use_sudo else '')
- self._install_new_cloud_init(remote_script)
+ 'tee /etc/apt/sources.list.d/proposed.list\n'
+ 'apt-get update -q\n'
+ 'apt-get install -qy cloud-init'
+ )
+ self.execute(remote_script)
- def install_ppa(self, repo):
+ def install_ppa(self):
log.info('Installing PPA')
remote_script = (
- '{sudo} add-apt-repository {repo} -y && '
- '{sudo} apt-get update -q && '
- '{sudo} apt-get install -qy cloud-init'
- ).format(sudo='sudo' if self.use_sudo else '', repo=repo)
- self._install_new_cloud_init(remote_script)
+ 'add-apt-repository {repo} -y && '
+ 'apt-get update -q && '
+ 'apt-get install -qy cloud-init'
+ ).format(repo=self.settings.CLOUD_INIT_SOURCE)
+ self.execute(remote_script)
def install_deb(self):
log.info('Installing deb package')
@@ -122,9 +168,13 @@ class IntegrationInstance:
self.push_file(
local_path=integration_settings.CLOUD_INIT_SOURCE,
remote_path=remote_path)
- remote_script = '{sudo} dpkg -i {path}'.format(
- sudo='sudo' if self.use_sudo else '', path=remote_path)
- self._install_new_cloud_init(remote_script)
+ remote_script = 'dpkg -i {path}'.format(path=remote_path)
+ self.execute(remote_script)
+
+ def upgrade_cloud_init(self):
+ log.info('Upgrading cloud-init to latest version in archive')
+ self.execute("apt-get update -q")
+ self.execute("apt-get install -qy cloud-init")
def __enter__(self):
return self
@@ -151,4 +201,4 @@ class IntegrationOciInstance(IntegrationInstance):
class IntegrationLxdInstance(IntegrationInstance):
- use_sudo = False
+ pass
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index a0609f7e..0703be58 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -1,19 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+from distutils.util import strtobool
+
##################################################################
# LAUNCH SETTINGS
##################################################################
# Keep instance (mostly for debugging) when test is finished
KEEP_INSTANCE = False
+# Keep snapshot image (mostly for debugging) when test is finished
+KEEP_IMAGE = False
+# Run tests marked as unstable. Expect failures and dragons.
+RUN_UNSTABLE = False
# One of:
# lxd_container
+# lxd_vm
# azure
# ec2
# gce
# oci
+# openstack
PLATFORM = 'lxd_container'
# The cloud-specific instance type to run. E.g., a1.medium on AWS
@@ -21,8 +29,11 @@ PLATFORM = 'lxd_container'
INSTANCE_TYPE = None
# Determines the base image to use or generate new images from.
-# Can be the name of the OS if running a stock image,
-# otherwise the id of the image being used if using a custom image
+#
+# This can be the name of an Ubuntu release, or in the format
+# <image_id>[::<os>[::<release>]]. If given, os and release should describe
+# the image specified by image_id. (Ubuntu releases are converted to this
+# format internally; in this case, to "focal::ubuntu::focal".)
OS_IMAGE = 'focal'
# Populate if you want to use a pre-launched instance instead of
@@ -49,35 +60,49 @@ EXISTING_INSTANCE_ID = None
# code.
# PROPOSED
# Install from the Ubuntu proposed repo
+# UPGRADE
+# Upgrade cloud-init to the version in the Ubuntu archive
# <ppa repo>, e.g., ppa:cloud-init-dev/proposed
# Install from a PPA. It MUST start with 'ppa:'
# <file path>
# A path to a valid package to be uploaded and installed
CLOUD_INIT_SOURCE = 'NONE'
+# Before an instance is torn down, we run `cloud-init collect-logs`
+# and transfer them locally. These settings specify when to collect these
+# logs and where to put them on the local filesystem
+# One of:
+# 'ALWAYS'
+# 'ON_ERROR'
+# 'NEVER'
+COLLECT_LOGS = 'ON_ERROR'
+LOCAL_LOG_PATH = '/tmp/cloud_init_test_logs'
+
##################################################################
-# GCE SPECIFIC SETTINGS
+# SSH KEY SETTINGS
##################################################################
-# Required for GCE
-GCE_PROJECT = None
-# You probably want to override these
-GCE_REGION = 'us-central1'
-GCE_ZONE = 'a'
+# A path to the public SSH key to use for test runs. (Defaults to pycloudlib's
+# default behaviour, using ~/.ssh/id_rsa.pub.)
+PUBLIC_SSH_KEY = None
+
+# For clouds which use named keypairs for SSH connection, the name that is used
+# for the keypair. (Defaults to pycloudlib's default behaviour.)
+KEYPAIR_NAME = None
##################################################################
-# OCI SPECIFIC SETTINGS
+# OPENSTACK SETTINGS
##################################################################
-# Compartment-id found at
-# https://console.us-phoenix-1.oraclecloud.com/a/identity/compartments
-# Required for Oracle
-OCI_COMPARTMENT_ID = None
+# Network to use for Openstack. Should be one of the names/ids found
+# in `openstack network list`
+OPENSTACK_NETWORK = None
##################################################################
# USER SETTINGS OVERRIDES
##################################################################
# Bring in any user-file defined settings
try:
+ # pylint: disable=wildcard-import,unused-wildcard-import
from tests.integration_tests.user_settings import * # noqa
except ImportError:
pass
@@ -91,6 +116,12 @@ except ImportError:
# Perhaps a bit too hacky, but it works :)
current_settings = [var for var in locals() if var.isupper()]
for setting in current_settings:
- globals()[setting] = os.getenv(
+ env_setting = os.getenv(
'CLOUD_INIT_{}'.format(setting), globals()[setting]
)
+ if isinstance(env_setting, str):
+ try:
+ env_setting = bool(strtobool(env_setting.strip()))
+ except ValueError:
+ pass
+ globals()[setting] = env_setting
diff --git a/tests/integration_tests/log_utils.py b/tests/integration_tests/log_utils.py
new file mode 100644
index 00000000..40baae7b
--- /dev/null
+++ b/tests/integration_tests/log_utils.py
@@ -0,0 +1,11 @@
+def verify_ordered_items_in_text(to_verify: list, text: str):
+ """Assert all items in list appear in order in text.
+
+ Examples:
+ verify_ordered_items_in_text(['a', '1'], 'ab1') # passes
+ verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError
+ """
+ index = 0
+ for item in to_verify:
+ index = text[index:].find(item)
+ assert index > -1, "Expected item not found: '{}'".format(item)
diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py
new file mode 100644
index 00000000..54711fc0
--- /dev/null
+++ b/tests/integration_tests/modules/test_apt.py
@@ -0,0 +1,298 @@
+"""Series of integration tests covering apt functionality."""
+import re
+from tests.integration_tests.clouds import ImageSpecification
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+USER_DATA = """\
+#cloud-config
+apt:
+ conf: |
+ APT {
+ Get {
+ Assume-Yes "true";
+ Fix-Broken "true";
+ }
+ }
+ proxy: "http://proxy.internal:3128"
+ http_proxy: "http://squid.internal:3128"
+ ftp_proxy: "ftp://squid.internal:3128"
+ https_proxy: "https://squid.internal:3128"
+ primary:
+ - arches: [default]
+ uri: http://badarchive.ubuntu.com/ubuntu
+ security:
+ - arches: [default]
+ uri: http://badsecurity.ubuntu.com/ubuntu
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb-src $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ deb-src $SECURITY $RELEASE-security multiverse
+ sources:
+ test_keyserver:
+ keyid: 72600DB15B8E4C8B1964B868038ACC97C660A937
+ keyserver: keyserver.ubuntu.com
+ source: "deb http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu $RELEASE main"
+ test_ppa:
+ keyid: 441614D8
+ keyserver: keyserver.ubuntu.com
+ source: "ppa:simplestreams-dev/trunk"
+ test_key:
+ source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.1.6
+ Comment: Hostname: keyserver.ubuntu.com
+
+ mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI
+ lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf
+ RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq
+ M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot
+ +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX
+ b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6
+ N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6
+ V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj
+ xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l
+ WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg
+ UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG
+ CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN
+ o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH
+ vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2
+ yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj
+ C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL
+ arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq
+ uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH
+ zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ
+ ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e
+ cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf
+ pb0uBy+g0oxJQg15
+ =uy53
+ -----END PGP PUBLIC KEY BLOCK-----
+apt_pipelining: os
+""" # noqa: E501
+
+EXPECTED_REGEXES = [
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+ r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+]
+
+TEST_KEYSERVER_KEY = "7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937"
+
+TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8"
+
+TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF"
+
+
+@pytest.mark.ci
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestApt:
+ def test_sources_list(self, class_client: IntegrationInstance):
+ """Integration test for the apt module's `sources_list` functionality.
+
+ This test specifies a ``sources_list`` and then checks that (a) the
+ expected number of sources.list entries is present, and (b) that each
+ expected line appears in the file.
+
+ (This is ported from
+ `tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml`.)
+ """
+ sources_list = class_client.read_from_file('/etc/apt/sources.list')
+ assert 6 == len(sources_list.rstrip().split('\n'))
+
+ for expected_re in EXPECTED_REGEXES:
+ assert re.search(expected_re, sources_list) is not None
+
+ def test_apt_conf(self, class_client: IntegrationInstance):
+ """Test the apt conf functionality.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_configure_conf.py
+ """
+ apt_config = class_client.read_from_file(
+ '/etc/apt/apt.conf.d/94cloud-init-config'
+ )
+ assert 'Assume-Yes "true";' in apt_config
+ assert 'Fix-Broken "true";' in apt_config
+
+ def test_apt_proxy(self, class_client: IntegrationInstance):
+ """Test the apt proxy functionality.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_configure_proxy.py
+ """
+ out = class_client.read_from_file(
+ '/etc/apt/apt.conf.d/90cloud-init-aptproxy')
+ assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out
+ assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out
+ assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out
+ assert 'Acquire::https::Proxy "https://squid.internal:3128";' in out
+
+ def test_ppa_source(self, class_client: IntegrationInstance):
+ """Test the apt ppa functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+ """
+ release = ImageSpecification.from_os_image().release
+ ppa_path_contents = class_client.read_from_file(
+ '/etc/apt/sources.list.d/'
+ 'simplestreams-dev-ubuntu-trunk-{}.list'.format(release)
+ )
+
+ assert (
+ 'http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu'
+ ) in ppa_path_contents
+
+ keys = class_client.execute('apt-key finger')
+ assert TEST_PPA_KEY in keys
+
+ def test_key(self, class_client: IntegrationInstance):
+ """Test the apt key functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+ """
+ test_archive_contents = class_client.read_from_file(
+ '/etc/apt/sources.list.d/test_key.list'
+ )
+
+ assert (
+ 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu'
+ ) in test_archive_contents
+
+ keys = class_client.execute('apt-key finger')
+ assert TEST_KEY in keys
+
+ def test_keyserver(self, class_client: IntegrationInstance):
+ """Test the apt keyserver functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+ """
+ test_keyserver_contents = class_client.read_from_file(
+ '/etc/apt/sources.list.d/test_keyserver.list'
+ )
+
+ assert (
+ 'http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu'
+ ) in test_keyserver_contents
+
+ keys = class_client.execute('apt-key finger')
+ assert TEST_KEYSERVER_KEY in keys
+
+ def test_os_pipelining(self, class_client: IntegrationInstance):
+ """Test 'os' settings does not write apt config file.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+ """
+ conf_exists = class_client.execute(
+ 'test -f /etc/apt/apt.conf.d/90cloud-init-pipelining'
+ ).ok
+ assert conf_exists is False
+
+
+_DEFAULT_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches:
+ - default
+ {uri}
+ security:
+ - arches:
+ - default
+"""
+DEFAULT_DATA = _DEFAULT_DATA.format(uri='')
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DEFAULT_DATA)
+class TestDefaults:
+ @pytest.mark.openstack
+ def test_primary_on_openstack(self, class_client: IntegrationInstance):
+ """Test apt default primary source on openstack.
+
+ When no uri is provided.
+ """
+ zone = class_client.execute('cloud-init query v1.availability_zone')
+ sources_list = class_client.read_from_file('/etc/apt/sources.list')
+ assert '{}.clouds.archive.ubuntu.com'.format(zone) in sources_list
+
+ def test_security(self, class_client: IntegrationInstance):
+ """Test apt default security sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_security.py
+ """
+ sources_list = class_client.read_from_file('/etc/apt/sources.list')
+
+ # 3 lines from main, universe, and multiverse
+ assert 3 == sources_list.count('deb http://security.ubuntu.com/ubuntu')
+ assert 3 == sources_list.count(
+ '# deb-src http://security.ubuntu.com/ubuntu'
+ )
+
+
+DEFAULT_DATA_WITH_URI = _DEFAULT_DATA.format(
+ uri='uri: "http://something.random.invalid/ubuntu"'
+)
+
+
+@pytest.mark.user_data(DEFAULT_DATA_WITH_URI)
+def test_default_primary_with_uri(client: IntegrationInstance):
+ """Test apt default primary sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_primary.py
+ """
+ sources_list = client.read_from_file('/etc/apt/sources.list')
+ assert 'archive.ubuntu.com' not in sources_list
+
+ assert 'something.random.invalid' in sources_list
+
+
+DISABLED_DATA = """\
+#cloud-config
+apt:
+ disable_suites:
+ - $RELEASE
+ - $RELEASE-updates
+ - $RELEASE-backports
+ - $RELEASE-security
+apt_pipelining: false
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DISABLED_DATA)
+class TestDisabled:
+ def test_disable_suites(self, class_client: IntegrationInstance):
+ """Test disabling of apt suites.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+ """
+ sources_list = class_client.execute(
+ "cat /etc/apt/sources.list | grep -v '^#'"
+ ).strip()
+ assert '' == sources_list
+
+ def test_disable_apt_pipelining(self, class_client: IntegrationInstance):
+ """Test disabling of apt pipelining.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+ """
+ conf = class_client.read_from_file(
+ '/etc/apt/apt.conf.d/90cloud-init-pipelining'
+ )
+ assert 'Acquire::http::Pipeline-Depth "0";' in conf
diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py
deleted file mode 100644
index d2bcc61a..00000000
--- a/tests/integration_tests/modules/test_apt_configure_sources_list.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""Integration test for the apt module's ``sources_list`` functionality.
-
-This test specifies a ``sources_list`` and then checks that (a) the expected
-number of sources.list entries is present, and (b) that each expected line
-appears in the file.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml``.)"""
-import re
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-apt:
- primary:
- - arches: [default]
- uri: http://archive.ubuntu.com/ubuntu
- security:
- - arches: [default]
- uri: http://security.ubuntu.com/ubuntu
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb-src $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- deb-src $SECURITY $RELEASE-security multiverse
-"""
-
-EXPECTED_REGEXES = [
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
- r"deb-src http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
-]
-
-
-@pytest.mark.ci
-class TestAptConfigureSourcesList:
-
- @pytest.mark.user_data(USER_DATA)
- def test_sources_list(self, client):
- sources_list = client.read_from_file("/etc/apt/sources.list")
- assert 6 == len(sources_list.rstrip().split('\n'))
-
- for expected_re in EXPECTED_REGEXES:
- assert re.search(expected_re, sources_list) is not None
diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py
new file mode 100644
index 00000000..89c01a9c
--- /dev/null
+++ b/tests/integration_tests/modules/test_ca_certs.py
@@ -0,0 +1,91 @@
+"""Integration tests for cc_ca_certs.
+
+(This is ported from ``tests/cloud_tests//testcases/modules/ca_certs.yaml``.)
+
+TODO:
+* Mark this as running on Debian and Alpine (once we have marks for that)
+* Implement testing for the RHEL-specific paths
+"""
+import os.path
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+ca-certs:
+ remove-defaults: true
+ trusted:
+ - |
+ -----BEGIN CERTIFICATE-----
+ MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx
+ DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP
+ d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl
+ bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW
+ E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz
+ MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93
+ d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl
+ MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG
+ 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc
+ k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ
+ yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX
+ RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6
+ q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2
+ uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S
+ vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o
+ 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4
+ Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF
+ z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1
+ SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3
+ Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT
+ TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5
+ ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3
+ DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr
+ mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf
+ PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ
+ 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM
+ slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L
+ ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT
+ Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro
+ RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586
+ CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l
+ hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i
+ DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
+ -----END CERTIFICATE-----
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestCaCerts:
+ def test_certs_updated(self, class_client):
+ """Test that /etc/ssl/certs is updated as we expect."""
+ root = "/etc/ssl/certs"
+ filenames = class_client.execute(["ls", "-1", root]).splitlines()
+ unlinked_files = []
+ links = {}
+ for filename in filenames:
+ full_path = os.path.join(root, filename)
+ symlink_target = class_client.execute(["readlink", full_path])
+ is_symlink = symlink_target.ok
+ if is_symlink:
+ links[filename] = symlink_target
+ else:
+ unlinked_files.append(filename)
+
+ assert ["ca-certificates.crt"] == unlinked_files
+ assert "cloud-init-ca-certs.pem" == links["a535c1f3.0"]
+ assert (
+ "/usr/share/ca-certificates/cloud-init-ca-certs.crt"
+ == links["cloud-init-ca-certs.pem"]
+ )
+
+ def test_cert_installed(self, class_client):
+ """Test that our specified cert has been installed"""
+ checksum = class_client.execute(
+ "sha256sum /etc/ssl/certs/ca-certificates.crt"
+ )
+ assert (
+ "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062"
+ in checksum
+ )
diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py
new file mode 100644
index 00000000..3f41b34d
--- /dev/null
+++ b/tests/integration_tests/modules/test_cli.py
@@ -0,0 +1,45 @@
+"""Integration tests for CLI functionality
+
+These would be for behavior manually invoked by user from the command line
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+VALID_USER_DATA = """\
+#cloud-config
+runcmd:
+ - echo 'hi' > /var/tmp/test
+"""
+
+INVALID_USER_DATA = """\
+runcmd:
+ - echo 'hi' > /var/tmp/test
+"""
+
+
+@pytest.mark.sru_2020_11
+@pytest.mark.user_data(VALID_USER_DATA)
+def test_valid_userdata(client: IntegrationInstance):
+ """Test `cloud-init devel schema` with valid userdata.
+
+ PR #575
+ """
+ result = client.execute('cloud-init devel schema --system')
+ assert result.ok
+ assert 'Valid cloud-config: system userdata' == result.stdout.strip()
+
+
+@pytest.mark.sru_2020_11
+@pytest.mark.user_data(INVALID_USER_DATA)
+def test_invalid_userdata(client: IntegrationInstance):
+ """Test `cloud-init devel schema` with invalid userdata.
+
+ PR #575
+ """
+ result = client.execute('cloud-init devel schema --system')
+ assert not result.ok
+ assert 'Cloud config schema errors' in result.stderr
+ assert 'needs to begin with "#cloud-config"' in result.stderr
diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py
new file mode 100644
index 00000000..298c9e6d
--- /dev/null
+++ b/tests/integration_tests/modules/test_keys_to_console.py
@@ -0,0 +1,48 @@
+"""Integration tests for the cc_keys_to_console module.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)"""
+import pytest
+
+BLACKLIST_USER_DATA = """\
+#cloud-config
+ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+"""
+
+DISABLED_USER_DATA = """\
+#cloud-config
+ssh:
+ emit_keys_to_console: false
+"""
+
+
+@pytest.mark.user_data(BLACKLIST_USER_DATA)
+class TestKeysToConsoleBlacklist:
+ """Test that the blacklist options work as expected."""
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"])
+ def test_excluded_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ @pytest.mark.parametrize("key_type", ["ED25519", "RSA"])
+ def test_included_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) in syslog
+
+
+@pytest.mark.user_data(DISABLED_USER_DATA)
+class TestKeysToConsoleDisabled:
+ """Test that output can be fully disabled."""
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"])
+ def test_keys_excluded(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ def test_header_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog
+
+ def test_footer_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "END SSH HOST KEY FINGERPRINTS" not in syslog
diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py
new file mode 100644
index 00000000..cbf11179
--- /dev/null
+++ b/tests/integration_tests/modules/test_lxd_bridge.py
@@ -0,0 +1,48 @@
+"""Integration tests for LXD bridge creation.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.)
+"""
+import pytest
+import yaml
+
+
+USER_DATA = """\
+#cloud-config
+lxd:
+ init:
+ storage_backend: dir
+ bridge:
+ mode: new
+ name: lxdbr0
+ ipv4_address: 10.100.100.1
+ ipv4_netmask: 24
+ ipv4_dhcp_first: 10.100.100.100
+ ipv4_dhcp_last: 10.100.100.200
+ ipv4_nat: true
+ domain: lxd
+"""
+
+
+@pytest.mark.no_container
+@pytest.mark.user_data(USER_DATA)
+class TestLxdBridge:
+
+ @pytest.mark.parametrize("binary_name", ["lxc", "lxd"])
+ def test_binaries_installed(self, class_client, binary_name):
+ """Check that the expected LXD binaries are installed"""
+ assert class_client.execute(["which", binary_name]).ok
+
+ @pytest.mark.not_xenial
+ @pytest.mark.sru_2020_11
+ def test_bridge(self, class_client):
+ """Check that the given bridge is configured"""
+ cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log")
+ assert "WARN" not in cloud_init_log
+
+ # The bridge should exist
+ assert class_client.execute("ip addr show lxdbr0")
+
+ raw_network_config = class_client.execute("lxc network show lxdbr0")
+ network_config = yaml.safe_load(raw_network_config)
+ assert "10.100.100.1/24" == network_config["config"]["ipv4.address"]
diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py
index 8a38ad84..28d741bc 100644
--- a/tests/integration_tests/modules/test_package_update_upgrade_install.py
+++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py
@@ -26,6 +26,7 @@ package_upgrade: true
"""
+@pytest.mark.ubuntu
@pytest.mark.user_data(USER_DATA)
class TestPackageUpdateUpgradeInstall:
diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py
new file mode 100644
index 00000000..eebe6608
--- /dev/null
+++ b/tests/integration_tests/modules/test_power_state_change.py
@@ -0,0 +1,90 @@
+"""Integration test of the cc_power_state_change module.
+
+Test that the power state config options work as expected.
+"""
+
+import time
+
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.log_utils import verify_ordered_items_in_text
+
+USER_DATA = """\
+#cloud-config
+power_state:
+ delay: {delay}
+ mode: {mode}
+ message: msg
+ timeout: {timeout}
+ condition: {condition}
+"""
+
+
+def _detect_reboot(instance: IntegrationInstance):
+ # We'll wait for instance up here, but we don't know if we're
+ # detecting the first boot or second boot, so we also check
+ # the logs to ensure we've booted twice. If the logs show we've
+ # only booted once, wait until we've booted twice
+ instance.instance.wait()
+ for _ in range(600):
+ try:
+ log = instance.read_from_file('/var/log/cloud-init.log')
+ boot_count = log.count("running 'init-local'")
+ if boot_count == 1:
+ instance.instance.wait()
+ elif boot_count > 1:
+ break
+ except Exception:
+ pass
+ time.sleep(1)
+ else:
+ raise Exception('Could not detect reboot')
+
+
+def _can_connect(instance):
+ return instance.execute('true').ok
+
+
+# This test is marked unstable because even though it should be able to
+# run anywhere, I can only get it to run in an lxd container, and even then
+# occasionally some timing issues will crop up.
+@pytest.mark.unstable
+@pytest.mark.sru_2020_11
+@pytest.mark.ubuntu
+@pytest.mark.lxd_container
+class TestPowerChange:
+ @pytest.mark.parametrize('mode,delay,timeout,expected', [
+ ('poweroff', 'now', '10', 'will execute: shutdown -P now msg'),
+ ('reboot', 'now', '0', 'will execute: shutdown -r now msg'),
+ ('halt', '+1', '0', 'will execute: shutdown -H +1 msg'),
+ ])
+ def test_poweroff(self, session_cloud: IntegrationCloud,
+ mode, delay, timeout, expected):
+ with session_cloud.launch(
+ user_data=USER_DATA.format(
+ delay=delay, mode=mode, timeout=timeout, condition='true'),
+ launch_kwargs={'wait': False},
+ ) as instance:
+ if mode == 'reboot':
+ _detect_reboot(instance)
+ else:
+ instance.instance.wait_for_stop()
+ instance.instance.start(wait=True)
+ log = instance.read_from_file('/var/log/cloud-init.log')
+ assert _can_connect(instance)
+ lines_to_check = [
+ 'Running module power-state-change',
+ expected,
+ "running 'init-local'",
+ 'config-power-state-change already ran',
+ ]
+ verify_ordered_items_in_text(lines_to_check, log)
+
+ @pytest.mark.user_data(USER_DATA.format(delay='0', mode='poweroff',
+ timeout='0', condition='false'))
+ def test_poweroff_false_condition(self, client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert _can_connect(client)
+ assert 'Condition was false. Will not perform state change' in log
diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py
index b365fa98..94e982e0 100644
--- a/tests/integration_tests/modules/test_seed_random_data.py
+++ b/tests/integration_tests/modules/test_seed_random_data.py
@@ -24,5 +24,7 @@ class TestSeedRandomData:
@pytest.mark.user_data(USER_DATA)
def test_seed_random_data(self, client):
- seed_output = client.read_from_file("/root/seed")
- assert seed_output.strip() == "MYUb34023nD:LFDK10913jk;dfnk:Df"
+ # Only read the first 31 characters, because the rest could be
+ # binary data
+ result = client.execute("head -c 31 < /root/seed")
+ assert result.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df")
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index b13f76fb..d7cf91a5 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -116,6 +116,30 @@ class Mixin:
# Which are not the same
assert shadow_users["harry"] != shadow_users["dick"]
+ def test_random_passwords_not_stored_in_cloud_init_output_log(
+ self, class_client
+ ):
+ """We should not emit passwords to the in-instance log file.
+
+ LP: #1918303
+ """
+ cloud_init_output = class_client.read_from_file(
+ "/var/log/cloud-init-output.log"
+ )
+ assert "dick:" not in cloud_init_output
+ assert "harry:" not in cloud_init_output
+
+ def test_random_passwords_emitted_to_serial_console(self, class_client):
+ """We should emit passwords to the serial console. (LP: #1918303)"""
+ try:
+ console_log = class_client.instance.console_log()
+ except NotImplementedError:
+ # Assume that an exception here means that we can't use the console
+ # log
+ pytest.skip("NotImplementedError when requesting console log")
+ assert "dick:" in console_log
+ assert "harry:" in console_log
+
def test_explicit_password_set_correctly(self, class_client):
"""Test that an explicitly-specified password is set correctly."""
shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client)
diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py
index b626f6b0..481edbaa 100644
--- a/tests/integration_tests/modules/test_snap.py
+++ b/tests/integration_tests/modules/test_snap.py
@@ -20,6 +20,7 @@ snap:
@pytest.mark.ci
+@pytest.mark.ubuntu
class TestSnap:
@pytest.mark.user_data(USER_DATA)
diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py
index 45d37d6c..3db573b5 100644
--- a/tests/integration_tests/modules/test_ssh_import_id.py
+++ b/tests/integration_tests/modules/test_ssh_import_id.py
@@ -3,6 +3,10 @@
This test specifies ssh keys to be imported by the ``ssh_import_id`` module
and then checks that if the ssh keys were successfully imported.
+TODO:
+* This test assumes that SSH keys will be imported into the /home/ubuntu; this
+ will need modification to run on other OSes.
+
(This is ported from
``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)"""
@@ -18,6 +22,7 @@ ssh_import_id:
@pytest.mark.ci
+@pytest.mark.ubuntu
class TestSshImportId:
@pytest.mark.user_data(USER_DATA)
diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py
index 27d193c1..6aae96ae 100644
--- a/tests/integration_tests/modules/test_ssh_keys_provided.py
+++ b/tests/integration_tests/modules/test_ssh_keys_provided.py
@@ -83,66 +83,78 @@ ssh_keys:
@pytest.mark.user_data(USER_DATA)
class TestSshKeysProvided:
- def test_ssh_dsa_keys_provided(self, class_client):
- """Test dsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key.pub")
- assert (
- "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R"
- "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM") in out
-
- """Test dsa private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key")
- assert (
- "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr"
- "hOVAfzZ6+jklP") in out
-
- def test_ssh_rsa_keys_provided(self, class_client):
- """Test rsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key.pub")
- assert (
- "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT"
- "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4") in out
-
- """Test rsa private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key")
- assert (
- "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un"
- "RQvLZpMRdywBm") in out
-
- def test_ssh_rsa_certificate_provided(self, class_client):
- """Test rsa certificate was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key-cert.pub")
- assert (
- "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg"
- "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD") in out
-
- def test_ssh_certificate_updated_sshd_config(self, class_client):
- """Test ssh certificate was added to /etc/ssh/sshd_config."""
- out = class_client.read_from_file("/etc/ssh/sshd_config").strip()
- assert "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub" in out
-
- def test_ssh_ecdsa_keys_provided(self, class_client):
- """Test ecdsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key.pub")
- assert (
- "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB"
- "BBFsS5Tvky/IC/dXhE/afxxU") in out
-
- """Test ecdsa private key generated."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key")
- assert (
- "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY"
- "5mpZqxgX4vcgb") in out
-
- def test_ssh_ed25519_keys_provided(self, class_client):
- """Test ed25519 public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key.pub")
- assert (
- "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6"
- "G15dqjQ2XkNVOEnb5") in out
-
- """Test ed25519 private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key")
- assert (
- "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT"
- "OhteXao0Nl5DVThJ2+Q") in out
+ @pytest.mark.parametrize(
+ "config_path,expected_out",
+ (
+ (
+ "/etc/ssh/ssh_host_dsa_key.pub",
+ (
+ "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R"
+ "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM"
+ ),
+ ),
+ (
+ "/etc/ssh/ssh_host_dsa_key",
+ (
+ "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr"
+ "hOVAfzZ6+jklP"
+ ),
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key.pub",
+ (
+ "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT"
+ "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4"
+ ),
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key",
+ (
+ "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un"
+ "RQvLZpMRdywBm"
+ ),
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key-cert.pub",
+ (
+ "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg"
+ "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD"
+ ),
+ ),
+ (
+ "/etc/ssh/sshd_config",
+ "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub",
+ ),
+ (
+ "/etc/ssh/ssh_host_ecdsa_key.pub",
+ (
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB"
+ "BBFsS5Tvky/IC/dXhE/afxxU"
+ ),
+ ),
+ (
+ "/etc/ssh/ssh_host_ecdsa_key",
+ (
+ "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY"
+ "5mpZqxgX4vcgb"
+ ),
+ ),
+ (
+ "/etc/ssh/ssh_host_ed25519_key.pub",
+ (
+ "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6"
+ "G15dqjQ2XkNVOEnb5"
+ ),
+ ),
+ (
+ "/etc/ssh/ssh_host_ed25519_key",
+ (
+ "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT"
+ "OhteXao0Nl5DVThJ2+Q"
+ ),
+ ),
+ )
+ )
+ def test_ssh_provided_keys(self, config_path, expected_out, class_client):
+ out = class_client.read_from_file(config_path).strip()
+ assert expected_out in out
diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py
index 6a51f5a6..bcb17b7f 100644
--- a/tests/integration_tests/modules/test_users_groups.py
+++ b/tests/integration_tests/modules/test_users_groups.py
@@ -1,12 +1,16 @@
-"""Integration test for the user_groups module.
+"""Integration tests for the user_groups module.
-This test specifies a number of users and groups via user-data, and confirms
-that they have been configured correctly in the system under test.
+TODO:
+* This module assumes that the "ubuntu" user will be created when "default" is
+ specified; this will need modification to run on other OSes.
"""
import re
import pytest
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+
USER_DATA = """\
#cloud-config
@@ -41,6 +45,13 @@ AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestUsersGroups:
+ """Test users and groups.
+
+ This test specifies a number of users and groups via user-data, and
+ confirms that they have been configured correctly in the system under test.
+ """
+
+ @pytest.mark.ubuntu
@pytest.mark.parametrize(
"getent_args,regex",
[
@@ -81,3 +92,32 @@ class TestUsersGroups:
_, groups_str = output.split(":", maxsplit=1)
groups = groups_str.split()
assert "secret" in groups
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_sudoers_includedir(client: IntegrationInstance):
+ """Ensure we don't add additional #includedir to sudoers.
+
+ Newer versions of /etc/sudoers will use @includedir rather than
+ #includedir. Ensure we handle that properly and don't include an
+ additional #includedir when one isn't warranted.
+
+ https://github.com/canonical/cloud-init/pull/783
+ """
+ if ImageSpecification.from_os_image().release in [
+ 'xenial', 'bionic', 'focal'
+ ]:
+ raise pytest.skip(
+ 'Test requires version of sudo installed on groovy and later'
+ )
+ client.execute("sed -i 's/#include/@include/g' /etc/sudoers")
+
+ sudoers = client.read_from_file('/etc/sudoers')
+ if '@includedir /etc/sudoers.d' not in sudoers:
+ client.execute("echo '@includedir /etc/sudoers.d' >> /etc/sudoers")
+ client.instance.clean()
+ client.restart()
+ sudoers = client.read_from_file('/etc/sudoers')
+
+ assert '#includedir' not in sudoers
+ assert sudoers.count('includedir /etc/sudoers.d') == 1
diff --git a/tests/integration_tests/test_logging.py b/tests/integration_tests/test_logging.py
new file mode 100644
index 00000000..b31a0434
--- /dev/null
+++ b/tests/integration_tests/test_logging.py
@@ -0,0 +1,22 @@
+"""Integration tests relating to cloud-init's logging."""
+
+
+class TestVarLogCloudInitOutput:
+ """Integration tests relating to /var/log/cloud-init-output.log."""
+
+ def test_var_log_cloud_init_output_not_world_readable(self, client):
+ """
+ The log can contain sensitive data, it shouldn't be world-readable.
+
+ LP: #1918303
+ """
+ # Check the file exists
+ assert client.execute("test -f /var/log/cloud-init-output.log").ok
+
+ # Check its permissions are as we expect
+ perms, user, group = client.execute(
+ "stat -c %a:%U:%G /var/log/cloud-init-output.log"
+ ).split(":")
+ assert "640" == perms
+ assert "root" == user
+ assert "adm" == group
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
new file mode 100644
index 00000000..c20cb3c1
--- /dev/null
+++ b/tests/integration_tests/test_upgrade.py
@@ -0,0 +1,98 @@
+import logging
+import pytest
+import time
+from pathlib import Path
+
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.conftest import (
+ get_validated_source,
+ session_start_time,
+)
+
+log = logging.getLogger('integration_testing')
+
+USER_DATA = """\
+#cloud-config
+hostname: SRU-worked
+"""
+
+
+def _output_to_compare(instance, file_path, netcfg_path):
+ commands = [
+ 'hostname',
+ 'dpkg-query --show cloud-init',
+ 'cat /run/cloud-init/result.json',
+ # 'cloud-init init' helps us understand if our pickling upgrade paths
+ # have broken across re-constitution of a cached datasource. Some
+ # platforms invalidate their datasource cache on reboot, so we run
+ # it here to ensure we get a dirty run.
+ 'cloud-init init',
+ 'grep Trace /var/log/cloud-init.log',
+ 'cloud-id',
+ 'cat {}'.format(netcfg_path),
+ 'systemd-analyze',
+ 'systemd-analyze blame',
+ 'cloud-init analyze show',
+ 'cloud-init analyze blame',
+ ]
+ with file_path.open('w') as f:
+ for command in commands:
+ f.write('===== {} ====='.format(command) + '\n')
+ f.write(instance.execute(command) + '\n')
+
+
+def _restart(instance):
+ # work around pad.lv/1908287
+ instance.restart()
+ if not instance.execute('cloud-init status --wait --long').ok:
+ for _ in range(10):
+ time.sleep(5)
+ result = instance.execute('cloud-init status --wait --long')
+ if result.ok:
+ return
+ raise Exception("Cloud-init didn't finish starting up")
+
+
+@pytest.mark.sru_2020_11
+def test_upgrade(session_cloud: IntegrationCloud):
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ pytest.skip("Install method '{}' not supported for this test".format(
+ source
+ ))
+ return # type checking doesn't understand that skip raises
+
+ launch_kwargs = {
+ 'image_id': session_cloud._get_initial_image(),
+ }
+
+ image = ImageSpecification.from_os_image()
+
+ # Get the paths to write test logs
+ output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH)
+ output_dir.mkdir(parents=True, exist_ok=True)
+ base_filename = 'test_upgrade_{platform}_{os}_{{stage}}_{time}.log'.format(
+ platform=session_cloud.settings.PLATFORM,
+ os=image.release,
+ time=session_start_time,
+ )
+ before_path = output_dir / base_filename.format(stage='before')
+ after_path = output_dir / base_filename.format(stage='after')
+
+ # Get the network cfg file
+ netcfg_path = '/dev/null'
+ if image.os == 'ubuntu':
+ netcfg_path = '/etc/netplan/50-cloud-init.yaml'
+ if image.release == 'xenial':
+ netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg'
+
+ with session_cloud.launch(
+ launch_kwargs=launch_kwargs, user_data=USER_DATA,
+ ) as instance:
+ _output_to_compare(instance, before_path, netcfg_path)
+ instance.install_new_cloud_init(source, take_snapshot=False)
+ instance.execute('hostname something-else')
+ _restart(instance)
+ _output_to_compare(instance, after_path, netcfg_path)
+
+ log.info('Wrote upgrade test logs to %s and %s', before_path, after_path)
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index fb2b55e8..8c968ae9 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -33,11 +33,12 @@ INSTANCE_ID = "i-testing"
class FakeDataSource(sources.DataSource):
- def __init__(self, userdata=None, vendordata=None):
+ def __init__(self, userdata=None, vendordata=None, vendordata2=None):
sources.DataSource.__init__(self, {}, None, None)
self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
self.vendordata_raw = vendordata
+ self.vendordata2_raw = vendordata2
def count_messages(root):
@@ -105,13 +106,14 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
self.assertEqual('qux', cc['baz'])
self.assertEqual('qux2', cc['bar'])
- def test_simple_jsonp_vendor_and_user(self):
+ def test_simple_jsonp_vendor_and_vendor2_and_user(self):
# test that user-data wins over vendor
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
- { "op": "add", "path": "/bar", "value": "qux2" }
+ { "op": "add", "path": "/bar", "value": "qux2" },
+ { "op": "add", "path": "/foobar", "value": "qux3" }
]
'''
vendor_blob = '''
@@ -119,12 +121,23 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
- { "op": "add", "path": "/foo", "value": "quxC" }
+ { "op": "add", "path": "/foo", "value": "quxC" },
+ { "op": "add", "path": "/corge", "value": "quxEE" }
+]
+'''
+ vendor2_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/corge", "value": "quxD" },
+ { "op": "add", "path": "/grault", "value": "quxFF" },
+ { "op": "add", "path": "/foobar", "value": "quxGG" }
]
'''
self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(user_blob,
+ vendordata=vendor_blob,
+ vendordata2=vendor2_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
@@ -138,9 +151,15 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
+ self.assertIn('vendor_data2', cfg)
+ # Confirm that vendordata2 overrides vendordata, and that
+ # userdata overrides both
self.assertEqual('qux', cfg['baz'])
self.assertEqual('qux2', cfg['bar'])
+ self.assertEqual('qux3', cfg['foobar'])
self.assertEqual('quxC', cfg['foo'])
+ self.assertEqual('quxD', cfg['corge'])
+ self.assertEqual('quxFF', cfg['grault'])
def test_simple_jsonp_no_vendor_consumed(self):
# make sure that vendor data is not consumed
@@ -294,6 +313,10 @@ run:
#!/bin/bash
echo "test"
'''
+ vendor2_blob = '''
+#!/bin/bash
+echo "dynamic test"
+'''
user_blob = '''
#cloud-config
@@ -303,7 +326,9 @@ vendor_data:
'''
new_root = self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(user_blob,
+ vendordata=vendor_blob,
+ vendordata2=vendor2_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index eb2828d5..cab1ac2b 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -7,6 +7,7 @@ from unittest import mock
from cloudinit import helpers
from cloudinit.sources import DataSourceAliYun as ay
+from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config
from cloudinit.tests import helpers as test_helpers
DEFAULT_METADATA = {
@@ -183,6 +184,35 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self.assertEqual(ay.parse_public_keys(public_keys),
public_keys['key-pair-0']['openssh-key'])
+ def test_route_metric_calculated_without_device_number(self):
+ """Test that route-metric code works without `device-number`
+
+ `device-number` is part of EC2 metadata, but not supported on aliyun.
+ Attempting to access it will raise a KeyError.
+
+ LP: #1917875
+ """
+ netcfg = convert_ec2_metadata_network_config(
+ {"interfaces": {"macs": {
+ "06:17:04:d7:26:09": {
+ "interface-id": "eni-e44ef49e",
+ },
+ "06:17:04:d7:26:08": {
+ "interface-id": "eni-e44ef49f",
+ }
+ }}},
+ macs_to_nics={
+ '06:17:04:d7:26:09': 'eth0',
+ '06:17:04:d7:26:08': 'eth1',
+ }
+ )
+
+ met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric']
+ met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric']
+
+ # route-metric numbers should be 100 apart
+ assert 100 == abs(met0 - met1)
+
class TestIsAliYun(test_helpers.CiTestCase):
ALIYUN_PRODUCT = 'Alibaba Cloud ECS'
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index e363c1f9..dedebeb1 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -159,6 +159,22 @@ SECONDARY_INTERFACE = {
}
}
+SECONDARY_INTERFACE_NO_IP = {
+ "macAddress": "220D3A047598",
+ "ipv6": {
+ "ipAddress": []
+ },
+ "ipv4": {
+ "subnet": [
+ {
+ "prefix": "24",
+ "address": "10.0.1.0"
+ }
+ ],
+ "ipAddress": []
+ }
+}
+
IMDS_NETWORK_METADATA = {
"interface": [
{
@@ -185,6 +201,7 @@ IMDS_NETWORK_METADATA = {
}
MOCKPATH = 'cloudinit.sources.DataSourceAzure.'
+EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8'
class TestParseNetworkConfig(CiTestCase):
@@ -391,7 +408,9 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
def setUp(self):
super(TestGetMetadataFromIMDS, self).setUp()
- self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01"
+ self.network_md_url = "{}/instance?api-version=2019-06-01".format(
+ dsaz.IMDS_URL
+ )
@mock.patch(MOCKPATH + 'readurl')
@mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True)
@@ -501,7 +520,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
"""Return empty dict when IMDS network metadata is absent."""
httpretty.register_uri(
httpretty.GET,
- dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
+ dsaz.IMDS_URL + '/instance?api-version=2017-12-01',
body={}, status=404)
m_net_is_up.return_value = True # skips dhcp
@@ -614,7 +633,7 @@ scbus-1 on xpt0 bus 0
return dsaz
def _get_ds(self, data, agent_command=None, distro='ubuntu',
- apply_network=None):
+ apply_network=None, instance_id=None):
def dsdevs():
return data.get('dsdevs', [])
@@ -643,7 +662,10 @@ scbus-1 on xpt0 bus 0
self.m_ephemeral_dhcpv4 = mock.MagicMock()
self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
- self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
+ if instance_id:
+ self.instance_id = instance_id
+ else:
+ self.instance_id = EXAMPLE_UUID
def _dmi_mocks(key):
if key == 'system-uuid':
@@ -894,7 +916,7 @@ scbus-1 on xpt0 bus 0
'azure_data': {
'configurationsettype': 'LinuxProvisioningConfiguration'},
'imds': NETWORK_METADATA,
- 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8',
+ 'instance-id': EXAMPLE_UUID,
'local-hostname': u'myhost',
'random_seed': 'wild'}
@@ -1139,6 +1161,30 @@ scbus-1 on xpt0 bus 0
dsrc.get_data()
self.assertEqual(expected_network_config, dsrc.network_config)
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_network_config_set_from_imds_for_secondary_nic_no_ip(
+ self, m_driver):
+ """If an IP address is empty then there should no config for it."""
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg}
+ expected_network_config = {
+ 'ethernets': {
+ 'eth0': {'set-name': 'eth0',
+ 'match': {'macaddress': '00:0d:3a:04:75:98'},
+ 'dhcp6': False,
+ 'dhcp4': True,
+ 'dhcp4-overrides': {'route-metric': 100}}},
+ 'version': 2}
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP)
+ self.m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
def test_availability_zone_set_from_imds(self):
"""Datasource.availability returns IMDS platformFaultDomain."""
sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
@@ -1310,23 +1356,51 @@ scbus-1 on xpt0 bus 0
for mypk in mypklist:
self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
- def test_default_ephemeral(self):
- # make sure the ephemeral device works
+ def test_default_ephemeral_configs_ephemeral_exists(self):
+ # make sure the ephemeral configs are correct if disk present
odata = {}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
'sys_cfg': {}}
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(
+ path)
- self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
- dsaz.RESOURCE_DISK_PATH)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
+ with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
+ dsaz.RESOURCE_DISK_PATH)
+ assert 'disk_setup' in cfg
+ assert 'fs_setup' in cfg
+ self.assertIsInstance(cfg['disk_setup'], dict)
+ self.assertIsInstance(cfg['fs_setup'], list)
+
+ def test_default_ephemeral_configs_ephemeral_does_not_exist(self):
+ # make sure the ephemeral configs are correct if disk not present
+ odata = {}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(
+ path)
+
+ with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ assert 'disk_setup' not in cfg
+ assert 'fs_setup' not in cfg
def test_provide_disk_aliases(self):
# Make sure that user can affect disk aliases
@@ -1573,6 +1647,32 @@ scbus-1 on xpt0 bus 0
self.assertTrue(ret)
self.assertEqual('value', dsrc.metadata['test'])
+ def test_instance_id_case_insensitive(self):
+ """Return the previous iid when current is a case-insensitive match."""
+ lower_iid = EXAMPLE_UUID.lower()
+ upper_iid = EXAMPLE_UUID.upper()
+ # lowercase current UUID
+ ds = self._get_ds(
+ {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid
+ )
+ # UPPERCASE previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
+ upper_iid)
+ ds.get_data()
+ self.assertEqual(upper_iid, ds.metadata['instance-id'])
+
+ # UPPERCASE current UUID
+ ds = self._get_ds(
+ {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid
+ )
+ # lowercase previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
+ lower_iid)
+ ds.get_data()
+ self.assertEqual(lower_iid, ds.metadata['instance-id'])
+
def test_instance_id_endianness(self):
"""Return the previous iid when dmi uuid is the byteswapped iid."""
ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
@@ -1588,8 +1688,7 @@ scbus-1 on xpt0 bus 0
os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
'644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
ds.get_data()
- self.assertEqual(
- 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
+ self.assertEqual(self.instance_id, ds.metadata['instance-id'])
def test_instance_id_from_dmidecode_used(self):
ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
@@ -1757,7 +1856,9 @@ scbus-1 on xpt0 bus 0
dsrc.get_data()
dsrc.setup(True)
ssh_keys = dsrc.get_public_ssh_keys()
- self.assertEqual(ssh_keys, ['key1'])
+ # Temporarily alter this test so that SSH public keys
+ # from IMDS are *not* going to be in use to fix a regression.
+ self.assertEqual(ssh_keys, [])
self.assertEqual(m_parse_certificates.call_count, 0)
@mock.patch(MOCKPATH + 'get_metadata_from_imds')
@@ -1778,6 +1879,40 @@ scbus-1 on xpt0 bus 0
ssh_keys = dsrc.get_public_ssh_keys()
self.assertEqual(ssh_keys, ['key2'])
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_imds_api_version_wanted_nonexistent(
+ self,
+ m_get_metadata_from_imds):
+ def get_metadata_from_imds_side_eff(*args, **kwargs):
+ if kwargs['api_version'] == dsaz.IMDS_VER_WANT:
+ raise url_helper.UrlError("No IMDS version", code=400)
+ return NETWORK_METADATA
+ m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+ self.assertTrue(dsrc.failed_desired_api_version)
+
+ @mock.patch(
+ MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA)
+ def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+ self.assertFalse(dsrc.failed_desired_api_version)
+
class TestAzureBounce(CiTestCase):
@@ -2558,7 +2693,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
@mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up')
@mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
@mock.patch('cloudinit.sources.net.find_fallback_nic')
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
@mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
@mock.patch('os.path.isfile')
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 4ab5d471..5912f7ee 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -27,6 +27,7 @@ from cloudinit.sources import (
DataSourceRbxCloud as RbxCloud,
DataSourceScaleway as Scaleway,
DataSourceSmartOS as SmartOS,
+ DataSourceUpCloud as UpCloud,
)
from cloudinit.sources import DataSourceNone as DSNone
@@ -48,6 +49,7 @@ DEFAULT_LOCAL = [
OpenStack.DataSourceOpenStackLocal,
RbxCloud.DataSourceRbxCloud,
Scaleway.DataSourceScaleway,
+ UpCloud.DataSourceUpCloudLocal,
]
DEFAULT_NETWORK = [
@@ -63,6 +65,7 @@ DEFAULT_NETWORK = [
NoCloud.DataSourceNoCloudNet,
OpenStack.DataSourceOpenStack,
OVF.DataSourceOVFNet,
+ UpCloud.DataSourceUpCloud,
]
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 6f830cc6..2e2b7847 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -494,6 +494,10 @@ class TestConfigDriveDataSource(CiTestCase):
self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform)
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestNetJson(CiTestCase):
def setUp(self):
super(TestNetJson, self).setUp()
@@ -654,6 +658,10 @@ class TestNetJson(CiTestCase):
self.assertEqual(out_data, conv_data)
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestConvertNetworkData(CiTestCase):
with_logs = True
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 415755aa..478f3503 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -40,6 +40,9 @@ USER_DATA = b'#!/bin/sh\necho This is user data\n'
VENDOR_DATA = {
'magic': '',
}
+VENDOR_DATA2 = {
+ 'static': {}
+}
OSTACK_META = {
'availability_zone': 'nova',
'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
@@ -60,6 +63,7 @@ OS_FILES = {
{'links': [], 'networks': [], 'services': []}),
'openstack/latest/user_data': USER_DATA,
'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
+ 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2),
}
EC2_FILES = {
'latest/user-data': USER_DATA,
@@ -142,6 +146,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
+ self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEqual(2, len(f['files']))
@@ -163,6 +168,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, {}, {}, OS_FILES)
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
+ self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEqual(USER_DATA, f.get('userdata'))
@@ -195,6 +201,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, {}, {}, os_files)
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
+ self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('userdata'))
@@ -210,6 +217,17 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('vendordata'))
+ def test_vendordata2_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith('vendor_data2.json'):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
+ self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
+ self.assertFalse(f.get('vendordata2'))
+
def test_vendordata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -218,6 +236,14 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(BrokenMetadata, _read_metadata_service)
+ def test_vendordata2_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith('vendor_data2.json'):
+ os_files[k] = '{' # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
def test_metadata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -246,6 +272,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(USER_DATA, ds_os.userdata_raw)
self.assertEqual(2, len(ds_os.files))
self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure)
self.assertIsNone(ds_os.vendordata_raw)
m_dhcp.assert_not_called()
@@ -278,6 +305,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
self.assertEqual(2, len(ds_os_local.files))
self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
self.assertIsNone(ds_os_local.vendordata_raw)
m_dhcp.assert_called_with('eth9', None)
@@ -401,7 +429,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertIsNone(ds_os.vendordata_raw)
self.assertEqual(
['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata',
- 'userdata', 'vendordata', 'version'],
+ 'userdata', 'vendordata', 'vendordata2', 'version'],
sorted(crawled_data.keys()))
self.assertEqual('local', crawled_data['dsmode'])
self.assertEqual(EC2_META, crawled_data['ec2-metadata'])
@@ -415,6 +443,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
crawled_data['networkdata'])
self.assertEqual(USER_DATA, crawled_data['userdata'])
self.assertEqual(VENDOR_DATA, crawled_data['vendordata'])
+ self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2'])
self.assertEqual(2, crawled_data['version'])
@@ -681,6 +710,7 @@ class TestMetadataReader(test_helpers.HttprettyTestCase):
'version': 2,
'metadata': expected_md,
'vendordata': vendor_data,
+ 'vendordata2': vendor_data2,
'networkdata': network_data,
'ec2-metadata': mock_read_ec2.return_value,
'files': {},
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index 16773de5..dce01f5d 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -17,6 +17,7 @@ from cloudinit.helpers import Paths
from cloudinit.sources import DataSourceOVF as dsovf
from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
CustomScriptNotFound)
+from cloudinit.safeyaml import YAMLError
MPATH = 'cloudinit.sources.DataSourceOVF.'
@@ -138,16 +139,29 @@ class TestDatasourceOVF(CiTestCase):
'DEBUG: No system-product-name found', self.logs.getvalue())
def test_get_data_no_vmware_customization_disabled(self):
- """When vmware customization is disabled via sys_cfg log a message."""
+ """When cloud-init workflow for vmware is disabled via sys_cfg and
+ no meta data provided, log a message.
+ """
paths = Paths({'cloud_dir': self.tdir})
ds = self.datasource(
sys_cfg={'disable_vmware_customization': True}, distro={},
paths=paths)
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345345
+ """)
+ util.write_file(conf_file, conf_content)
retcode = wrap_and_call(
'cloudinit.sources.DataSourceOVF',
{'dmi.read_dmi_data': 'vmware',
'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND},
+ 'transport_vmware_guestinfo': NOT_FOUND,
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file},
ds.get_data)
self.assertFalse(retcode, 'Expected False return from ds.get_data')
self.assertIn(
@@ -344,6 +358,279 @@ class TestDatasourceOVF(CiTestCase):
'vmware (%s/seed/ovf-env.xml)' % self.tdir,
ds.subplatform)
+ def test_get_data_cloudinit_metadata_json(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is json.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': True}, distro={},
+ paths=paths)
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """)
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path('test-meta', self.tdir)
+ metadata_content = dedent("""\
+ {
+ "instance-id": "cloud-vm",
+ "local-hostname": "my-host.domain.com",
+ "network": {
+ "version": 2,
+ "ethernets": {
+ "eths": {
+ "match": {
+ "name": "ens*"
+ },
+ "dhcp4": true
+ }
+ }
+ }
+ }
+ """)
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ result = wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''],
+ 'get_nics_to_enable': ''},
+ ds._get_data)
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata['instance-id'])
+ self.assertEqual("my-host.domain.com", ds.metadata['local-hostname'])
+ self.assertEqual(2, ds.network_config['version'])
+ self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4'])
+
+ def test_get_data_cloudinit_metadata_yaml(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is yaml.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': True}, distro={},
+ paths=paths)
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """)
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path('test-meta', self.tdir)
+ metadata_content = dedent("""\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """)
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ result = wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''],
+ 'get_nics_to_enable': ''},
+ ds._get_data)
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata['instance-id'])
+ self.assertEqual("my-host.domain.com", ds.metadata['local-hostname'])
+ self.assertEqual(2, ds.network_config['version'])
+ self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4'])
+
+ def test_get_data_cloudinit_metadata_not_valid(self):
+ """Test metadata is not JSON or YAML format.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': True}, distro={},
+ paths=paths)
+
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """)
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path('test-meta', self.tdir)
+ metadata_content = "[This is not json or yaml format]a=b"
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ with self.assertRaises(YAMLError) as context:
+ wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'collect_imc_file_paths': [
+ self.tdir + '/test-meta', '', ''
+ ],
+ 'get_nics_to_enable': ''},
+ ds.get_data)
+
+ self.assertIn("expected '<document start>', but found '<scalar>'",
+ str(context.exception))
+
+ def test_get_data_cloudinit_metadata_not_found(self):
+ """Test metadata file can't be found.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': True}, distro={},
+ paths=paths)
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """)
+ util.write_file(conf_file, conf_content)
+ # Don't prepare the meta data file
+
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'get_nics_to_enable': ''},
+ ds.get_data)
+
+ self.assertIn('is not found', str(context.exception))
+
+ def test_get_data_cloudinit_userdata(self):
+ """Test user data can be loaded to cloud-init user data.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': False}, distro={},
+ paths=paths)
+
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """)
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path('test-meta', self.tdir)
+ metadata_content = dedent("""\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """)
+ util.write_file(metadata_file, metadata_content)
+
+ # Prepare the user data file
+ userdata_file = self.tmp_path('test-user', self.tdir)
+ userdata_content = "This is the user data"
+ util.write_file(userdata_file, userdata_content)
+
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ result = wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'collect_imc_file_paths': [self.tdir + '/test-meta',
+ self.tdir + '/test-user', ''],
+ 'get_nics_to_enable': ''},
+ ds._get_data)
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata['instance-id'])
+ self.assertEqual(userdata_content, ds.userdata_raw)
+
+ def test_get_data_cloudinit_userdata_not_found(self):
+ """Test userdata file can't be found.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': True}, distro={},
+ paths=paths)
+
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """)
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path('test-meta', self.tdir)
+ metadata_content = dedent("""\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """)
+ util.write_file(metadata_file, metadata_content)
+
+ # Don't prepare the user data file
+
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'get_nics_to_enable': ''},
+ ds.get_data)
+
+ self.assertIn('is not found', str(context.exception))
+
class TestTransportIso9660(CiTestCase):
diff --git a/tests/unittests/test_datasource/test_upcloud.py b/tests/unittests/test_datasource/test_upcloud.py
new file mode 100644
index 00000000..cec48b4b
--- /dev/null
+++ b/tests/unittests/test_datasource/test_upcloud.py
@@ -0,0 +1,314 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers
+from cloudinit import settings
+from cloudinit import sources
+from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \
+ DataSourceUpCloudLocal
+
+from cloudinit.tests.helpers import mock, CiTestCase
+
+UC_METADATA = json.loads("""
+{
+ "cloud_name": "upcloud",
+ "instance_id": "00322b68-0096-4042-9406-faad61922128",
+ "hostname": "test.example.com",
+ "platform": "servers",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "public_keys": [
+ "ssh-rsa AAAAB.... test1@example.com",
+ "ssh-rsa AAAAB.... test2@example.com"
+ ],
+ "region": "fi-hel2",
+ "network": {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": null,
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:8a:e1",
+ "network_id": "035a0a4a-7704-4de5-820d-189fc8132714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ },
+ "storage": {
+ "disks": [
+ {
+ "id": "014efb65-223b-4d44-8f0a-c29535b88dcf",
+ "serial": "014efb65223b4d448f0a",
+ "size": 10240,
+ "type": "disk",
+ "tier": "maxiops"
+ }
+ ]
+ },
+ "tags": [],
+ "user_data": "",
+ "vendor_data": ""
+}
+""")
+
+UC_METADATA["user_data"] = b"""#cloud-config
+runcmd:
+- [touch, /root/cloud-init-worked ]
+"""
+
+MD_URL = 'http://169.254.169.254/metadata/v1.json'
+
+
+def _mock_dmi():
+ return True, "00322b68-0096-4042-9406-faad61922128"
+
+
+class TestUpCloudMetadata(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+ def setUp(self):
+ super(TestUpCloudMetadata, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloud(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo')
+ def test_returns_false_not_on_upcloud(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw())
+ self.assertEqual(UC_METADATA.get('vendor_data'),
+ ds.get_vendordata_raw())
+ self.assertEqual(UC_METADATA.get('region'), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name)
+
+ self.assertEqual(UC_METADATA.get('public_keys'),
+ ds.get_public_ssh_keys())
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestUpCloudNetworkSetup(CiTestCase):
+ """
+ Test reading the meta-data on networked context
+ """
+
+ def setUp(self):
+ super(TestUpCloudNetworkSetup, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloudLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
+ @mock.patch('cloudinit.net.find_fallback_nic')
+ @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+ def test_network_configured_metadata(self, m_net, m_dhcp,
+ m_fallback_nic, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ m_fallback_nic.return_value = 'eth1'
+ m_dhcp.return_value = [{
+ 'interface': 'eth1', 'fixed-address': '10.6.3.27',
+ 'routers': '10.6.0.1', 'subnet-mask': '22',
+ 'broadcast-address': '10.6.3.255'}
+ ]
+
+ ds = self.get_ds()
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(m_dhcp.called)
+ m_dhcp.assert_called_with('eth1', None)
+
+ m_net.assert_called_once_with(
+ broadcast='10.6.3.255', interface='eth1',
+ ip='10.6.3.27', prefix_or_mask='22',
+ router='10.6.0.1', static_routes=None
+ )
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get('region'), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name)
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_network_configuration(self, m_get_by_mac, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ raw_ifaces = UC_METADATA.get('network').get('interfaces')
+ self.assertEqual(4, len(raw_ifaces))
+
+ m_get_by_mac.return_value = {
+ raw_ifaces[0].get('mac'): 'eth0',
+ raw_ifaces[1].get('mac'): 'eth1',
+ raw_ifaces[2].get('mac'): 'eth2',
+ raw_ifaces[3].get('mac'): 'eth3',
+ }
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ netcfg = ds.network_config
+
+ self.assertEqual(1, netcfg.get('version'))
+
+ config = netcfg.get('config')
+ self.assertIsInstance(config, list)
+ self.assertEqual(5, len(config))
+ self.assertEqual('physical', config[3].get('type'))
+
+ self.assertEqual(raw_ifaces[2].get('mac'), config[2]
+ .get('mac_address'))
+ self.assertEqual(1, len(config[2].get('subnets')))
+ self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0]
+ .get('type'))
+
+ self.assertEqual(2, len(config[0].get('subnets')))
+ self.assertEqual('static', config[0].get('subnets')[1].get('type'))
+
+ dns = config[4]
+ self.assertEqual('nameserver', dns.get('type'))
+ self.assertEqual(2, len(dns.get('address')))
+ self.assertEqual(
+ UC_METADATA.get('network').get('dns')[1],
+ dns.get('address')[1]
+ )
+
+
+class TestUpCloudDatasourceLoading(CiTestCase):
+ def test_get_datasource_list_returns_in_local(self):
+ deps = (sources.DEP_FILESYSTEM, )
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list,
+ [DataSourceUpCloudLocal])
+
+ def test_get_datasource_list_returns_in_normal(self):
+ deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list,
+ [DataSourceUpCloud])
+
+ def test_list_sources_finds_ds(self):
+ found = sources.list_sources(
+ ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK),
+ ['cloudinit.sources'])
+ self.assertEqual([DataSourceUpCloud],
+ found)
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 44607489..336150bc 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -119,6 +119,19 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
self.assertIn("josh", contents)
self.assertEqual(2, contents.count("josh"))
+ def test_sudoers_ensure_only_one_includedir(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ for char in ['#', '@']:
+ util.write_file("/etc/sudoers", "{}includedir /b".format(char))
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+ self.assertEqual(1, contents.count("includedir /b"))
+
def test_arch_package_mirror_info_unknown(self):
"""for an unknown arch, we should get back that with arch 'default'."""
arch_mirrors = gapmi(package_mirrors, arch="unknown")
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index e74a0a08..6e3831ed 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -47,12 +47,20 @@ class TestConfig(TestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.name = "ca-certs"
- distro = self._fetch_distro('ubuntu')
self.paths = None
- self.cloud = cloud.Cloud(None, self.paths, None, distro, None)
self.log = logging.getLogger("TestNoConfig")
self.args = []
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def _get_cloud(self, kind):
+ distro = self._fetch_distro(kind)
+ return cloud.Cloud(None, self.paths, None, distro, None)
+
+ def _mock_init(self):
self.mocks = ExitStack()
self.addCleanup(self.mocks.close)
@@ -64,11 +72,6 @@ class TestConfig(TestCase):
self.mock_remove = self.mocks.enter_context(
mock.patch.object(cc_ca_certs, 'remove_default_ca_certs'))
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
def test_no_trusted_list(self):
"""
Test that no certificates are written if the 'trusted' key is not
@@ -76,71 +79,95 @@ class TestConfig(TestCase):
"""
config = {"ca-certs": {}}
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = self._get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
def test_empty_trusted_list(self):
"""Test that no certificate are written if 'trusted' list is empty."""
config = {"ca-certs": {"trusted": []}}
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = self._get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
def test_single_trusted(self):
"""Test that a single cert gets passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1"]}}
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = self._get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
+ self.mock_add.assert_called_once_with(conf, ['CERT1'])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
def test_multiple_trusted(self):
"""Test that multiple certs get passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = self._get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
- self.mock_add.assert_called_once_with(['CERT1', 'CERT2'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
+ self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2'])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
def test_remove_default_ca_certs(self):
"""Test remove_defaults works as expected."""
config = {"ca-certs": {"remove-defaults": True}}
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = self._get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 1)
def test_no_remove_defaults_if_false(self):
"""Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": False}}
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = self._get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
def test_correct_order_for_remove_then_add(self):
"""Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = self._get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
+ self.mock_add.assert_called_once_with(conf, ['CERT1'])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 1)
class TestAddCaCerts(TestCase):
@@ -152,12 +179,20 @@ class TestAddCaCerts(TestCase):
self.paths = helpers.Paths({
'cloud_dir': tmpdir,
})
+ self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat")
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
def test_no_certs_in_list(self):
"""Test that no certificate are written if not provided."""
- with mock.patch.object(util, 'write_file') as mockobj:
- cc_ca_certs.add_ca_certs([])
- self.assertEqual(mockobj.call_count, 0)
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(util, 'write_file') as mockobj:
+ cc_ca_certs.add_ca_certs(conf, [])
+ self.assertEqual(mockobj.call_count, 0)
def test_single_cert_trailing_cr(self):
"""Test adding a single certificate to the trusted CAs
@@ -167,20 +202,28 @@ class TestAddCaCerts(TestCase):
ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
+ self.m_stat.return_value.st_size = 1
- cc_ca_certs.add_ca_certs([cert])
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, 'write_file'))
+ mock_load = mocks.enter_context(
+ mock.patch.object(util, 'load_file',
+ return_value=ca_certs_content))
+
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ mock_write.assert_has_calls([
+ mock.call(conf['ca_cert_full_path'],
+ cert, mode=0o644)])
+ if conf['ca_cert_config'] is not None:
+ mock_write.assert_has_calls([
+ mock.call(conf['ca_cert_config'],
+ expected, omode="wb")])
+ mock_load.assert_called_once_with(conf['ca_cert_config'])
def test_single_cert_no_trailing_cr(self):
"""Test adding a single certificate to the trusted CAs
@@ -189,24 +232,31 @@ class TestAddCaCerts(TestCase):
ca_certs_content = "line1\nline2\nline3"
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
- cc_ca_certs.add_ca_certs([cert])
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, 'write_file'))
+ mock_load = mocks.enter_context(
+ mock.patch.object(util, 'load_file',
+ return_value=ca_certs_content))
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode="wb")])
+ cc_ca_certs.add_ca_certs(conf, [cert])
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
+ mock_write.assert_has_calls([
+ mock.call(conf['ca_cert_full_path'],
+ cert, mode=0o644)])
+ if conf['ca_cert_config'] is not None:
+ mock_write.assert_has_calls([
+ mock.call(conf['ca_cert_config'],
+ "%s\n%s\n" % (ca_certs_content,
+ conf['ca_cert_filename']),
+ omode="wb")])
+
+ mock_load.assert_called_once_with(conf['ca_cert_config'])
def test_single_cert_to_empty_existing_ca_file(self):
"""Test adding a single certificate to the trusted CAs
@@ -215,20 +265,22 @@ class TestAddCaCerts(TestCase):
expected = "cloud-init-ca-certs.crt\n"
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file', autospec=True))
- mock_stat = mocks.enter_context(
- mock.patch("cloudinit.config.cc_ca_certs.os.stat")
- )
- mock_stat.return_value.st_size = 0
+ self.m_stat.return_value.st_size = 0
- cc_ca_certs.add_ca_certs([cert])
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(util, 'write_file',
+ autospec=True) as m_write:
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ m_write.assert_has_calls([
+ mock.call(conf['ca_cert_full_path'],
+ cert, mode=0o644)])
+ if conf['ca_cert_config'] is not None:
+ m_write.assert_has_calls([
+ mock.call(conf['ca_cert_config'],
+ expected, omode="wb")])
def test_multiple_certs(self):
"""Test adding multiple certificates to the trusted CAs."""
@@ -236,32 +288,41 @@ class TestAddCaCerts(TestCase):
expected_cert_file = "\n".join(certs)
ca_certs_content = "line1\nline2\nline3"
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, 'write_file'))
+ mock_load = mocks.enter_context(
+ mock.patch.object(util, 'load_file',
+ return_value=ca_certs_content))
- cc_ca_certs.add_ca_certs(certs)
+ cc_ca_certs.add_ca_certs(conf, certs)
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- expected_cert_file, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode='wb')])
+ mock_write.assert_has_calls([
+ mock.call(conf['ca_cert_full_path'],
+ expected_cert_file, mode=0o644)])
+ if conf['ca_cert_config'] is not None:
+ mock_write.assert_has_calls([
+ mock.call(conf['ca_cert_config'],
+ "%s\n%s\n" % (ca_certs_content,
+ conf['ca_cert_filename']),
+ omode='wb')])
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
+ mock_load.assert_called_once_with(conf['ca_cert_config'])
class TestUpdateCaCerts(unittest.TestCase):
def test_commands(self):
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_ca_certs.update_ca_certs()
- mockobj.assert_called_once_with(
- ["update-ca-certificates"], capture=False)
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(subp, 'subp') as mockobj:
+ cc_ca_certs.update_ca_certs(conf)
+ mockobj.assert_called_once_with(
+ conf['ca_cert_update_cmd'], capture=False)
class TestRemoveDefaultCaCerts(TestCase):
@@ -275,24 +336,31 @@ class TestRemoveDefaultCaCerts(TestCase):
})
def test_commands(self):
- with ExitStack() as mocks:
- mock_delete = mocks.enter_context(
- mock.patch.object(util, 'delete_dir_contents'))
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp'))
-
- cc_ca_certs.remove_default_ca_certs('ubuntu')
-
- mock_delete.assert_has_calls([
- mock.call("/usr/share/ca-certificates/"),
- mock.call("/etc/ssl/certs/")])
-
- mock_write.assert_called_once_with(
- "/etc/ca-certificates.conf", "", mode=0o644)
-
- mock_subp.assert_called_once_with(
- ('debconf-set-selections', '-'),
- "ca-certificates ca-certificates/trust_new_crts select no")
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_delete = mocks.enter_context(
+ mock.patch.object(util, 'delete_dir_contents'))
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, 'write_file'))
+ mock_subp = mocks.enter_context(
+ mock.patch.object(subp, 'subp'))
+
+ cc_ca_certs.remove_default_ca_certs(distro_name, conf)
+
+ mock_delete.assert_has_calls([
+ mock.call(conf['ca_cert_path']),
+ mock.call(conf['ca_cert_system_path'])])
+
+ if conf['ca_cert_config'] is not None:
+ mock_write.assert_called_once_with(
+ conf['ca_cert_config'], "", mode=0o644)
+
+ if distro_name in ['debian', 'ubuntu']:
+ mock_subp.assert_called_once_with(
+ ('debconf-set-selections', '-'),
+ "ca-certificates \
+ca-certificates/trust_new_crts select no")
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
index 47e7d804..15fe7b23 100644
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ b/tests/unittests/test_handler/test_handler_locale.py
@@ -44,6 +44,29 @@ class TestLocale(t_help.FilesystemMockingTestCase):
cc = cloud.Cloud(ds, paths, {}, d, None)
return cc
+ def test_set_locale_arch(self):
+ locale = 'en_GB.UTF-8'
+ locale_configfile = '/etc/invalid-locale-path'
+ cfg = {
+ 'locale': locale,
+ 'locale_configfile': locale_configfile,
+ }
+ cc = self._get_cloud('arch')
+
+ with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp:
+ with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG:
+ cc_locale.handle('cc_locale', cfg, cc, LOG, [])
+ m_LOG.assert_called_with('Invalid locale_configfile %s, '
+ 'only supported value is '
+ '/etc/locale.conf',
+ locale_configfile)
+
+ contents = util.load_file(cc.distro.locale_gen_fn)
+ self.assertIn('%s UTF-8' % locale, contents)
+ m_subp.assert_called_with(['localectl',
+ 'set-locale',
+ locale], capture=False)
+
def test_set_locale_sles(self):
cfg = {
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 70453683..cb636f41 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1365,10 +1365,11 @@ NETWORK_CONFIGS = {
},
'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
- BOOTPROTO=none
+ BOOTPROTO=dhcp
DEVICE=iface0
DHCPV6C=yes
IPV6INIT=yes
+ IPV6_AUTOCONF=no
IPV6_FORCE_ACCEPT_RA=yes
DEVICE=iface0
NM_CONTROLLED=no
@@ -2932,6 +2933,10 @@ iface eth1 inet dhcp
self.assertEqual(0, mock_settle.call_count)
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestRhelSysConfigRendering(CiTestCase):
with_logs = True
@@ -3619,6 +3624,10 @@ USERCTL=no
expected, self._render_and_read(network_config=v2data))
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestOpenSuseSysConfigRendering(CiTestCase):
with_logs = True
@@ -4819,6 +4828,9 @@ class TestEniRoundTrip(CiTestCase):
{'type': 'route', 'id': 6,
'metric': 1, 'destination': '10.0.200.0/16',
'gateway': '172.23.31.1'},
+ {'type': 'route', 'id': 7,
+ 'metric': 1, 'destination': '10.0.0.100/32',
+ 'gateway': '172.23.31.1'},
]
files = self._render_and_read(
@@ -4842,6 +4854,10 @@ class TestEniRoundTrip(CiTestCase):
'172.23.31.1 metric 1 || true'),
('pre-down route del -net 10.0.200.0/16 gw '
'172.23.31.1 metric 1 || true'),
+ ('post-up route add -host 10.0.0.100/32 gw '
+ '172.23.31.1 metric 1 || true'),
+ ('pre-down route del -host 10.0.0.100/32 gw '
+ '172.23.31.1 metric 1 || true'),
]
found = files['/etc/network/interfaces'].splitlines()
@@ -5029,6 +5045,10 @@ class TestNetRenderers(CiTestCase):
self.assertTrue(result)
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestGetInterfaces(CiTestCase):
_data = {'bonds': ['bond1'],
'bridges': ['bridge1'],
@@ -5178,6 +5198,10 @@ class TestInterfaceHasOwnMac(CiTestCase):
self.assertFalse(interface_has_own_mac("eth0"))
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestGetInterfacesByMac(CiTestCase):
_data = {'bonds': ['bond1'],
'bridges': ['bridge1'],
@@ -5334,6 +5358,10 @@ class TestInterfacesSorting(CiTestCase):
['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3'])
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False)
+)
class TestGetIBHwaddrsByInterface(CiTestCase):
_ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 857629f1..e5292001 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -572,6 +572,10 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
util.multi_log(logged_string)
self.assertEqual(logged_string, self.stdout.getvalue())
+ def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self):
+ util.multi_log('something', fallback_to_stdout=False)
+ self.assertEqual('', self.stdout.getvalue())
+
def test_logs_go_to_log_if_given(self):
log = mock.MagicMock()
logged_string = 'something very important'
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
index 9c7d25fa..430cc69f 100644
--- a/tests/unittests/test_vmware_config_file.py
+++ b/tests/unittests/test_vmware_config_file.py
@@ -525,5 +525,21 @@ class TestVmwareNetConfig(CiTestCase):
'gateway': '10.20.87.253'}]}],
nc.generate())
+ def test_meta_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.meta_data_name)
+ cf._insertKey("CLOUDINIT|METADATA", "test-metadata")
+ conf = Config(cf)
+ self.assertEqual("test-metadata", conf.meta_data_name)
+
+ def test_user_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.user_data_name)
+ cf._insertKey("CLOUDINIT|USERDATA", "test-userdata")
+ conf = Config(cf)
+ self.assertEqual("test-userdata", conf.user_data_name)
+
# vi: ts=4 expandtab
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 1e0c3ea4..5c57acac 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -1,33 +1,48 @@
ader1990
+ajmyyra
AlexBaranowski
Aman306
+andrewbogott
+antonyc
aswinrajamannar
beezly
bipinbachhao
BirknerAlex
candlerb
+cawamata
+dankenigsberg
dermotbradley
dhensby
eandersson
+eb3095
emmanuelthome
izzyleung
johnsonshi
+jordimassaguerpla
jqueuniet
jsf9k
+klausenbusk
landon912
lucasmoura
lungj
manuelisimo
marlluslustosa
matthewruffell
+mitechie
nishigori
+olivierlemasle
omBratteng
onitake
+qubidt
riedel
slyon
smoser
sshedi
TheRealFalcon
+taoyama
+tnt-dev
tomponline
tsanghan
WebSpider
+xiachen-rh
+xnox
diff --git a/tools/ds-identify b/tools/ds-identify
index 496dbb8a..2f2486f7 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -125,7 +125,7 @@ DI_DSNAME=""
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud"
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -883,6 +883,11 @@ dscheck_RbxCloud() {
return ${DS_NOT_FOUND}
}
+dscheck_UpCloud() {
+ dmi_sys_vendor_is UpCloud && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
ovf_vmware_guest_customization() {
# vmware guest customization
diff --git a/tox.ini b/tox.ini
index 022b918d..3158ebd5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -26,9 +26,9 @@ deps =
pylint==2.6.0
# test-requirements because unit tests are now present in cloudinit tree
-r{toxinidir}/test-requirements.txt
- -r{toxinidir}/cloud-tests-requirements.txt
-r{toxinidir}/integration-requirements.txt
-commands = {envpython} -m pylint {posargs:cloudinit tests tools}
+commands = {envpython} -m pylint {posargs:cloudinit tests --ignore=cloud_tests tools}
+
[testenv:py3]
basepython = python3
@@ -123,13 +123,12 @@ commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
deps = flake8
[testenv:tip-pylint]
-commands = {envpython} -m pylint {posargs:cloudinit tests tools}
+commands = {envpython} -m pylint {posargs:cloudinit tests --ignore=cloud_tests tools}
deps =
# requirements
pylint
# test-requirements
-r{toxinidir}/test-requirements.txt
- -r{toxinidir}/cloud-tests-requirements.txt
-r{toxinidir}/integration-requirements.txt
[testenv:citest]
@@ -148,13 +147,13 @@ deps =
[testenv:integration-tests]
basepython = python3
commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_*
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_*
deps =
-r{toxinidir}/integration-requirements.txt
[testenv:integration-tests-ci]
commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_*
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_*
deps =
-r{toxinidir}/integration-requirements.txt
setenv =
@@ -164,6 +163,8 @@ setenv =
# TODO: s/--strict/--strict-markers/ once xenial support is dropped
testpaths = cloudinit tests/unittests
addopts = --strict
+log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s
+log_date_format = %Y-%m-%d %H:%M:%S
markers =
allow_subp_for: allow subp usage for the given commands (disable_subp_usage)
allow_all_subp: allow all subp usage (disable_subp_usage)
@@ -173,9 +174,18 @@ markers =
gce: test will only run on GCE platform
azure: test will only run on Azure platform
oci: test will only run on OCI platform
+ openstack: test will only run on openstack
+ lxd_config_dict: set the config_dict passed on LXD instance creation
lxd_container: test will only run in LXD container
+ lxd_use_exec: `execute` will use `lxc exec` instead of SSH
lxd_vm: test will only run in LXD VM
+ not_xenial: test cannot run on the xenial release
+ not_bionic: test cannot run on the bionic release
no_container: test cannot run in a container
user_data: the user data to be passed to the test instance
instance_name: the name to be used for the test instance
sru_2020_11: test is part of the 2020/11 SRU verification
+ sru_2021_01: test is part of the 2021/01 SRU verification
+ sru_next: test is part of the next SRU verification
+ ubuntu: this test should run on Ubuntu
+ unstable: skip this test because it is flakey