summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2020-01-15 09:36:22 -0700
committerChad Smith <chad.smith@canonical.com>2020-01-15 09:36:22 -0700
commit7c3f6c689bfc91461a486613d8f1e4a90b3b94c9 (patch)
tree3d2db08b35b6f9820b6567196d8160d3437df7e9
parente06831d0df930a5e2f5f999892aeb8442dd7ffbb (diff)
parentbb4131a2bd36d9e8932fdcb61432260f16159cde (diff)
downloadcloud-init-git-7c3f6c689bfc91461a486613d8f1e4a90b3b94c9.tar.gz
merge from origin/master at 19.4-33-gbb4131a2
-rw-r--r--.github/workflows/cla.yml29
-rw-r--r--.travis.yml12
-rw-r--r--ChangeLog76
-rw-r--r--HACKING.rst25
-rw-r--r--README.md48
-rw-r--r--SECURITY.md64
-rw-r--r--cloudinit/analyze/tests/test_boot.py8
-rw-r--r--cloudinit/apport.py4
-rw-r--r--cloudinit/cmd/tests/test_query.py28
-rw-r--r--cloudinit/config/cc_disk_setup.py4
-rw-r--r--cloudinit/config/cc_growpart.py26
-rw-r--r--cloudinit/config/cc_keys_to_console.py6
-rw-r--r--cloudinit/config/cc_salt_minion.py5
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py3
-rw-r--r--cloudinit/config/cc_scripts_per_once.py5
-rw-r--r--cloudinit/config/cc_set_hostname.py10
-rwxr-xr-xcloudinit/config/cc_set_passwords.py6
-rw-r--r--cloudinit/config/cc_snap_config.py184
-rw-r--r--cloudinit/config/cc_snappy.py322
-rwxr-xr-xcloudinit/config/cc_ssh.py93
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py6
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py8
-rw-r--r--cloudinit/config/cc_users_groups.py6
-rw-r--r--cloudinit/config/tests/test_set_passwords.py4
-rw-r--r--cloudinit/config/tests/test_users_groups.py28
-rw-r--r--cloudinit/cs_utils.py2
-rwxr-xr-x[-rw-r--r--]cloudinit/distros/__init__.py8
-rw-r--r--cloudinit/distros/amazon.py26
-rw-r--r--cloudinit/distros/freebsd.py491
-rw-r--r--cloudinit/net/__init__.py66
-rw-r--r--cloudinit/net/dhcp.py42
-rw-r--r--cloudinit/net/eni.py4
-rw-r--r--cloudinit/net/freebsd.py175
-rw-r--r--cloudinit/net/netplan.py2
-rw-r--r--cloudinit/net/network_state.py4
-rw-r--r--cloudinit/net/renderers.py4
-rw-r--r--cloudinit/net/sysconfig.py4
-rw-r--r--cloudinit/net/tests/test_dhcp.py65
-rw-r--r--cloudinit/net/tests/test_network_state.py47
-rw-r--r--cloudinit/netinfo.py12
-rw-r--r--cloudinit/settings.py1
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py22
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py2
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py1
-rwxr-xr-xcloudinit/sources/helpers/azure.py27
-rw-r--r--cloudinit/ssh_util.py80
-rw-r--r--cloudinit/tests/test_util.py21
-rw-r--r--cloudinit/util.py43
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.d/README4
-rw-r--r--config/cloud.cfg.tmpl26
-rw-r--r--doc-requirements.txt5
-rw-r--r--doc/examples/cloud-config-add-apt-repos.txt6
-rw-r--r--doc/examples/cloud-config-ssh-keys.txt4
-rw-r--r--doc/examples/cloud-config-update-apt.txt2
-rw-r--r--doc/man/cloud-id.131
-rw-r--r--doc/man/cloud-init-per.145
-rw-r--r--doc/man/cloud-init.188
-rw-r--r--doc/rtd/conf.py1
-rw-r--r--doc/rtd/index.rst2
-rw-r--r--doc/rtd/topics/bugs.rst108
-rw-r--r--doc/rtd/topics/datasources.rst7
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst2
-rw-r--r--doc/rtd/topics/datasources/rbxcloud.rst2
-rw-r--r--doc/rtd/topics/examples.rst10
-rw-r--r--doc/rtd/topics/faq.rst205
-rw-r--r--doc/rtd/topics/format.rst10
-rw-r--r--doc/rtd/topics/instancedata.rst2
-rw-r--r--doc/rtd/topics/modules.rst5
-rw-r--r--doc/rtd/topics/network-config.rst2
-rw-r--r--doc/rtd/topics/security.rst5
-rw-r--r--doc/rtd/topics/tests.rst7
-rw-r--r--packages/debian/manpages3
-rwxr-xr-xsetup.py13
-rwxr-xr-xsysvinit/freebsd/cloudinit2
-rw-r--r--tests/cloud_tests/testcases/modules/TODO.md7
-rw-r--r--tests/cloud_tests/testcases/modules/snappy.py17
-rw-r--r--tests/cloud_tests/testcases/modules/snappy.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py2
-rw-r--r--tests/data/netinfo/freebsd-ifconfig-output52
-rw-r--r--tests/data/netinfo/freebsd-netdev-formatted-output23
-rw-r--r--tests/unittests/test_datasource/test_azure.py24
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py19
-rw-r--r--tests/unittests/test_datasource/test_common.py13
-rw-r--r--tests/unittests/test_distros/test_create_users.py2
-rw-r--r--tests/unittests/test_distros/test_netconfig.py258
-rw-r--r--tests/unittests/test_ds_identify.py24
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py29
-rw-r--r--tests/unittests/test_handler/test_handler_snappy.py601
-rw-r--r--tests/unittests/test_net_freebsd.py19
-rw-r--r--tests/unittests/test_sshutil.py83
-rw-r--r--tools/.lp-to-git-user15
-rwxr-xr-xtools/ds-identify57
-rwxr-xr-xtools/migrate-lp-user-to-github14
-rwxr-xr-xtools/render-cloudcfg4
-rw-r--r--tox.ini8
96 files changed, 2039 insertions, 2008 deletions
diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml
new file mode 100644
index 00000000..34e11c2d
--- /dev/null
+++ b/.github/workflows/cla.yml
@@ -0,0 +1,29 @@
+name: Verify Contributor License Agreement
+
+on: [pull_request]
+
+jobs:
+ cla-validate:
+
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v1
+ - run: |
+ echo "::set-env name=CLA_SIGNED::$(grep -q ': \"${{ github.actor }}\"' ./tools/.lp-to-git-user && echo CLA signed || echo CLA not signed)"
+ - name: Add CLA label
+ run: |
+ # POST a new label to this issue
+ curl --request POST \
+ --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \
+ --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
+ --header 'content-type: application/json' \
+ --data '{"labels": ["${{env.CLA_SIGNED}}"]}'
+ - name: Comment about CLA signing
+ if: env.CLA_SIGNED == 'CLA not signed'
+ run: |
+ # POST a comment directing submitter to sign the CLA
+ curl --request POST \
+ --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/comments \
+ --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
+ --header 'content-type: application/json' \
+ --data '{"body": "Hello ${{ github.actor }},\n\nThank you for your contribution to cloud-init.\n\nIn order for us to merge this pull request, you need\nto have signed the Contributor License Agreement (CLA).\nPlease ensure that you have signed the CLA by following our\nhacking guide at:\n\nhttps://cloudinit.readthedocs.io/en/latest/topics/hacking.html\n\nThanks,\nYour friendly cloud-init upstream\n"}'
diff --git a/.travis.yml b/.travis.yml
index fba00571..15157b86 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -8,13 +8,15 @@ install:
- pip install tox
script:
- - 'if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then tox; fi'
+ - tox
matrix:
fast_finish: true
include:
- python: 3.6
- env: TOXENV=py3
+ env:
+ TOXENV=py3
+ NOSE_VERBOSE=2 # List all tests run by nose
- install:
- git fetch --unshallow
- sudo apt-get build-dep -y cloud-init
@@ -41,10 +43,10 @@ matrix:
- sudo -E su $USER -c 'sbuild --nolog --verbose --dist=xenial cloud-init_*.dsc'
# Ubuntu LTS: Integration
- sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb'
- - python: 2.7
- env: TOXENV=py27
- python: 3.4
- env: TOXENV=xenial
+ env:
+ TOXENV=xenial
+ NOSE_VERBOSE=2 # List all tests run by nose
# Travis doesn't support Python 3.4 on bionic, so use xenial
dist: xenial
- python: 3.6
diff --git a/ChangeLog b/ChangeLog
index 2b8edb45..0430267f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,79 @@
+19.4
+ - doc: specify _ over - in cloud config modules
+ [Joshua Powers] (LP: #1293254)
+ - tools: Detect python to use via env in migrate-lp-user-to-github
+ [Adam Dobrawy]
+ - Partially revert "fix unlocking method on FreeBSD" (#116)
+ - tests: mock uid when running as root (#113)
+ [Joshua Powers] (LP: #1856096)
+ - cloudinit/netinfo: remove unused getgateway (#111)
+ - docs: clear up apt config sections (#107) [Joshua Powers] (LP: #1832823)
+ - doc: add kernel command line option to user data (#105)
+ [Joshua Powers] (LP: #1846524)
+ - config/cloud.cfg.d: update README [Joshua Powers] (LP: #1855006)
+ - azure: avoid re-running cloud-init when instance-id is byte-swapped
+ (#84) [AOhassan]
+ - fix unlocking method on FreeBSD [Igor Galić] (LP: #1854594)
+ - debian: add reference to the manpages [Joshua Powers]
+ - ds_identify: if /sys is not available use dmidecode (#42)
+ [Igor Galić] (LP: #1852442)
+ - docs: add cloud-id manpage [Joshua Powers]
+ - docs: add cloud-init-per manpage [Joshua Powers]
+ - docs: add cloud-init manpage [Joshua Powers]
+ - docs: add additional details to per-instance/once [Joshua Powers]
+ - Update doc-requirements.txt [Joshua Powers]
+ - doc-requirements: add missing dep [Joshua Powers]
+ - dhcp: Support RedHat dhcp rfc3442 lease format for option 121 (#76)
+ [Eric Lafontaine] (LP: #1850642)
+ - network_state: handle empty v1 config (#45) (LP: #1852496)
+ - docs: Add document on how to report bugs [Joshua Powers]
+ - Add an Amazon distro in the redhat OS family [Frederick Lefebvre]
+ - removed a couple of "the"s [gaughen]
+ - docs: fix line length and remove highlighting [Joshua Powers]
+ - docs: Add security.md to readthedocs [Joshua Powers]
+ - Multiple file fix for AuthorizedKeysFile config (#60) [Eduardo Otubo]
+ - Revert "travis: only run CI on pull requests"
+ - doc: update links on README.md [Joshua Powers]
+ - doc: Updates to wording of README.md [Joshua Powers]
+ - Add security.md [Joshua Powers]
+ - setup.py: Amazon Linux sets libexec to /usr/libexec (#52)
+ [Frederick Lefebvre]
+ - Fix linting failure in test_url_helper (#83) [Eric Lafontaine]
+ - url_helper: read_file_or_url should pass headers param into readurl
+ (#66) (LP: #1854084)
+ - dmidecode: log result *after* stripping n [Igor Galić]
+ - cloud_tests: add azure platform support to integration tests
+ [ahosmanmsft]
+ - set_passwords: support for FreeBSD (#46) [Igor Galić]
+ - tools: migrate-lp-user-to-github removes repo_dir if created (#35)
+ - Correct jumbled documentation for cc_set_hostname module (#64)
+ [do3meli] (LP: #1853543)
+ - FreeBSD: fix for get_linux_distro() and lru_cache (#59)
+ [Igor Galić] (LP: #1815030)
+ - ec2: Add support for AWS IMDS v2 (session-oriented) (#55)
+ - tests: Fix cloudsigma tests when no dmidecode data is present. (#57)
+ [Scott Moser]
+ - net: IPv6, accept_ra, slaac, stateless (#51)
+ [Harald] (LP: #1806014, #1808647)
+ - docs: Update the configdrive datasource links (#44)
+ [Joshua Powers] (LP: #1852461)
+ - distro: correctly set usr_lib_exec path for FreeBSD distro (#40)
+ [Igor Galić] (LP: #1852491)
+ - azure: support secondary ipv6 addresses (#33)
+ - Fix metadata check when local-hostname is null (#32)
+ [Mark Goddard] (LP: #1852100)
+ - switch default FreeBSD salt minion pkg from py27 to py36
+ [Dominic Schlegel]
+ - travis: only run CI on pull requests
+ - add data-server dns entry as new metadata server detection [Joshua Hügli]
+ - pycodestyle: remove unused local variable
+ - reporting: Using a uuid to enforce uniqueness on the KVP keys. [momousta]
+ - docs: touchups in rtd intro and README.md
+ - doc: update launchpad git refs to github
+ - github: drop pull-request template to prepare for migration
+ - tools: add migrate-lp-user-to-github script to link LP to github
+ - github: new basic project readme
+
19.3
- azure: support matching dhcp route-metrics for dual-stack ipv4 ipv6
(LP: #1850308)
diff --git a/HACKING.rst b/HACKING.rst
index 8c8e518f..4ebdac17 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -17,12 +17,13 @@ Do these things once
has signed. When signing the CLA and prompted for 'Project contact' or
'Canonical Project Manager' enter 'Josh Powers'.
- For existing contributors who've already signed the agreement, we can verify
- the link between your `Launchpad`_ account and your `GitHub`_ account by
- creating a branch with both your Launchpad and GitHub usernames into both
- Launchpad and GitHub cloud-init repositories. We've added a tool
- (tools/migrate-lp-user-to-github) to the cloud-init repository to handle this
- migration as automatically as possible.
+ For first-time signers, or for existing contributors who have already signed
+ the agreement in Launchpad, we need to verify the link between your
+ `Launchpad`_ account and your `GitHub`_ account. To enable us to do this, we
+ ask that you create a branch with both your Launchpad and GitHub usernames
+ against both the Launchpad and GitHub cloud-init repositories. We've added a
+ tool (``tools/migrate-lp-user-to-github``) to the cloud-init repository to
+ handle this migration as automatically as possible.
The cloud-init team will review the two merge proposals and verify
that the CLA has been signed for the Launchpad user and record the
@@ -125,3 +126,15 @@ have any questions.
.. _tox: https://tox.readthedocs.io/en/latest/
.. _Ubuntu Server: https://github.com/orgs/canonical/teams/ubuntu-server
+
+Design
+======
+
+This section captures design decisions that are helpful to know when
+hacking on cloud-init.
+
+Cloud Config Modules
+--------------------
+
+* Any new modules should use underscores in any new config options and not
+ hyphens (e.g. `new_option` and *not* `new-option`).
diff --git a/README.md b/README.md
index 9e47b286..d648e426 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,4 @@
-# Cloud-init official project upstream as of 11/2019
-This repository is also mirrored to https://launchpad.net/cloud-init
+# cloud-init
[![Build Status](https://travis-ci.org/canonical/cloud-init.svg?branch=master)](https://travis-ci.org/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
@@ -16,45 +15,42 @@ Cloud instances are initialized from a disk image and instance data:
Cloud-init will identify the cloud it is running on during boot, read any
provided metadata from the cloud and initialize the system accordingly. This
-may involve setting up the network and storage devices to configuring SSH
-access key and many other aspects of a system. Later on the cloud-init will
+may involve setting up network and storage devices to configuring SSH
+access key and many other aspects of a system. Later on cloud-init will
also parse and process any optional user or vendor data that was passed to the
instance.
-## Getting involved
-All contributions welcome! [Submit code and docs by following our hacking guide](https://cloudinit.readthedocs.io/en/latest/topics/hacking.html)
-
## Getting help
-Having trouble? We would like to help!
+If you need support, start with the [user documentation](https://cloudinit.readthedocs.io/en/latest/).
-- Ask a question in the [``#cloud-init`` IRC channel on Freenode](https://webchat.freenode.net/?channel=#cloud-init)
-- Join and ask questions on the [cloud-init mailing list](https://launchpad.net/~cloud-init)
-- Find a bug? [Report bugs on Launchpad](https://bugs.launchpad.net/cloud-init)
+If you need additional help consider reaching out with one of the following options:
-## Recent cloud-init upstream releases
-Upstream release version | Release date |
---- | --- |
-19.4 | planned (2019-12-XX) |
-[19.3](https://launchpad.net/cloud-init/+milestone/19.3) | 2019-11-05 |
-[19.2](https://launchpad.net/cloud-init/+milestone/19.2) | 2019-07-17 |
-[19.1](https://launchpad.net/cloud-init/+milestone/19.1) | 2019-05-10 |
+- Ask a question in the [``#cloud-init`` IRC channel on Freenode](https://webchat.freenode.net/?channel=#cloud-init)
+- Search the cloud-init [mailing list archive](https://lists.launchpad.net/cloud-init/)
+- Better yet, join the [cloud-init mailing list](https://launchpad.net/~cloud-init) and participate
+- Find a bug? [Report bugs on Launchpad](https://bugs.launchpad.net/cloud-init/+filebug)
+## Distribution and cloud support
-## Cloud-init distribution and cloud support
-Note: Each linux distribution and cloud tracks cloud-init upstream updates at
-a different pace. If your distribution or cloud doesn't contain a recent
-cloud-init, suggest or propose an upgrade with your distribution of choice.
+Below are a list of the many OSes and clouds that contain and ship with cloud-init. If your
+distribution or cloud is not listed or does not have a recent version of cloud-init, please
+get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
| Ubuntu<br />SLES/openSUSE<br />RHEL/CentOS<br />Fedora<br />Gentoo Linux<br />Debian<br />ArchLinux<br />FreeBSD<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+## To start developing cloud-init
+
+Checkout the [hacking](https://cloudinit.readthedocs.io/en/latest/topics/hacking.html)
+document that outlines the steps necessary to develop, test, and submit code.
-## Daily Package Builds
-We host daily [Ubuntu Daily PPAs](https://code.launchpad.net/~cloud-init-dev/+recipes) that build package for each Ubuntu series from tip of cloud-init.
+## Daily builds
-For CentOS 7/8 we publish to a couple of COPR build repos:
+Daily builds are useful if you want to try the latest upstream code for the latest
+features or to verify bug fixes.
- * [**cloud-init-dev**: daily builds from cloud-init tip](https://copr.fedorainfracloud.org/coprs/g/cloud-init/cloud-init-dev/)
+For Ubuntu, see the [Daily PPAs](https://code.launchpad.net/~cloud-init-dev/+archive/ubuntu/daily)
+For CentOS, see the [COPR build repos](https://copr.fedorainfracloud.org/coprs/g/cloud-init/cloud-init-dev/)
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 00000000..69360bb7
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,64 @@
+# Security Policy
+
+The following documents the upstream cloud-init security policy.
+
+## Reporting
+
+If a user finds a security issue, they are requested to file a [private
+security bug on Launchpad](https://bugs.launchpad.net/cloud-init/+filebug).
+To ensure the information stays private, change the "This bug contains
+information that is:" from "Public" to "Private Security" when filing.
+
+After the bug is received, the issue is triaged within 2 working days of
+being reported and a response is sent to the reporter.
+
+## cloud-init-security
+
+The cloud-init-security Launchpad team is a private, invite-only team used to
+discuss and coordinate security issues with the project.
+
+Any issues disclosed to the cloud-init-security mailing list are considered
+embargoed and should only be discussed with other members of the
+cloud-init-security mailing list before the coordinated release date, unless
+specific exception is granted by the administrators of the mailing list. This
+includes disclosure of any details related to the vulnerability or the
+presence of a vulnerability itself. Violation of this policy may result in
+removal from the list for the company or individual involved.
+
+## Evaluation
+
+If the reported bug is deemed a real security issue a CVE is assigned by
+the Canonical Security Team as CVE Numbering Authority (CNA).
+
+If it is deemed a regular, non-security, issue, the reporter will be asked to
+follow typical bug reporting procedures.
+
+In addition to the disclosure timeline, the core Canonical cloud-init team
+will enlist the expertise of the Ubuntu Security team for guidance on
+industry-standard disclosure practices as necessary.
+
+If an issue specifically involves another distro or cloud vendor, additional
+individuals will be informed of the issue to help in evaluation.
+
+## Disclosure
+
+Disclosure of security issues will be made with a public statement. Once the
+determined time for disclosure has arrived the following will occur:
+
+* A public bug is filed/made public with vulnerability details, CVE,
+ mitigations and where to obtain the fix
+* An email is sent to the [public cloud-init mailing list](https://lists.launchpad.net/cloud-init/)
+
+The disclosure timeframe is coordinated with the reporter and members of the
+cloud-init-security list. This depends on a number of factors:
+
+* The reporter might have their own disclosure timeline (e.g. Google Project
+ Zero and many others use a 90-days after initial report OR when a fix
+ becomes public)
+* It might take time to decide upon and develop an appropriate fix
+* A distros might want extra time to backport any possible fixes before
+ the fix becomes public
+* A cloud may need additional time to prepare to help customers or impliment
+ a fix
+* The issue might be deemed low priority
+* May wish to to align with an upcoming planned release
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
index 706e2cc0..f4001c14 100644
--- a/cloudinit/analyze/tests/test_boot.py
+++ b/cloudinit/analyze/tests/test_boot.py
@@ -12,17 +12,17 @@ class TestDistroChecker(CiTestCase):
@mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
''),
'system': ''})
- @mock.patch('platform.linux_distribution', return_value=('', '', ''))
+ @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
@mock.patch('cloudinit.util.is_FreeBSD', return_value=False)
- def test_blank_distro(self, m_sys_info, m_linux_distribution, m_free_bsd):
+ def test_blank_distro(self, m_sys_info, m_get_linux_distro, m_free_bsd):
self.assertEqual(err_code, dist_check_timestamp())
@mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
'')})
- @mock.patch('platform.linux_distribution', return_value=('', '', ''))
+ @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
@mock.patch('cloudinit.util.is_FreeBSD', return_value=True)
def test_freebsd_gentoo_cant_find(self, m_sys_info,
- m_linux_distribution, m_is_FreeBSD):
+ m_get_linux_distro, m_is_FreeBSD):
self.assertEqual(err_code, dist_check_timestamp())
@mock.patch('cloudinit.util.subp', return_value=(0, 1))
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index c6797f12..1f2c2e7e 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -34,12 +34,14 @@ KNOWN_CLOUD_NAMES = [
'OpenStack',
'Oracle',
'OVF',
+ 'RbxCloud - (HyperOne, Rootbox, Rubikon)',
'OpenTelekomCloud',
'Scaleway',
'SmartOS',
'VMware',
'ZStack',
- 'Other']
+ 'Other'
+]
# Potentially clear text collected logs
CLOUDINIT_LOG = '/var/log/cloud-init.log'
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
index 28738b1e..c48605ad 100644
--- a/cloudinit/cmd/tests/test_query.py
+++ b/cloudinit/cmd/tests/test_query.py
@@ -150,7 +150,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=False,
user_data='ud', vendor_data='vd', varname=None)
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(
'{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
' "vendordata": "<%s> file:vd"\n}\n' % (
@@ -165,7 +167,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=False,
user_data='ud', vendor_data='vd', varname='my_var')
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual('it worked\n', m_stdout.getvalue())
def test_handle_args_returns_nested_varname(self):
@@ -177,7 +181,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, user_data='ud', vendor_data='vd',
list_keys=False, varname='v1.key_2')
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual('value-2\n', m_stdout.getvalue())
def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
@@ -206,7 +212,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, user_data='ud', vendor_data='vd',
list_keys=False, varname=None)
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(expected, m_stdout.getvalue())
def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
@@ -221,7 +229,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=True, user_data='ud',
vendor_data='vd', varname=None)
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(expected, m_stdout.getvalue())
def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
@@ -236,7 +246,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=True,
user_data='ud', vendor_data='vd', varname='v1')
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(expected, m_stdout.getvalue())
def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
@@ -252,7 +264,9 @@ class TestQuery(CiTestCase):
vendor_data='vd', varname='top')
with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(1, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(1, query.handle_args('anyname', args))
self.assertEqual('', m_stdout.getvalue())
self.assertIn(expected_error, m_stderr.getvalue())
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 29e192e8..d8d0fcf1 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -982,7 +982,9 @@ def mkfs(fs_cfg):
# File systems that support the -F flag
if overwrite or device_type(device) == "disk":
- fs_cmd.append(lookup_force_flag(fs_type))
+ force_flag = lookup_force_flag(fs_type)
+ if force_flag:
+ fs_cmd.append(force_flag)
# Add the extends FS options
if fs_opts:
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index aa9716e7..1b512a06 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -22,11 +22,11 @@ mountpoint in the filesystem or a path to the block device in ``/dev``.
The utility to use for resizing can be selected using the ``mode`` config key.
If ``mode`` key is set to ``auto``, then any available utility (either
-``growpart`` or ``gpart``) will be used. If neither utility is available, no
-error will be raised. If ``mode`` is set to ``growpart``, then the ``growpart``
-utility will be used. If this utility is not available on the system, this will
-result in an error. If ``mode`` is set to ``off`` or ``false``, then
-``cc_growpart`` will take no action.
+``growpart`` or BSD ``gpart``) will be used. If neither utility is available,
+no error will be raised. If ``mode`` is set to ``growpart``, then the
+``growpart`` utility will be used. If this utility is not available on the
+system, this will result in an error. If ``mode`` is set to ``off`` or
+``false``, then ``cc_growpart`` will take no action.
There is some functionality overlap between this module and the ``growroot``
functionality of ``cloud-initramfs-tools``. However, there are some situations
@@ -132,7 +132,7 @@ class ResizeGrowPart(object):
try:
(out, _err) = util.subp(["growpart", "--help"], env=myenv)
- if re.search(r"--update\s+", out, re.DOTALL):
+ if re.search(r"--update\s+", out):
return True
except util.ProcessExecutionError:
@@ -161,9 +161,17 @@ class ResizeGrowPart(object):
class ResizeGpart(object):
def available(self):
- if not util.which('gpart'):
- return False
- return True
+ myenv = os.environ.copy()
+ myenv['LANG'] = 'C'
+
+ try:
+ (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
+ if re.search(r"gpart recover ", err):
+ return True
+
+ except util.ProcessExecutionError:
+ pass
+ return False
def resize(self, diskdev, partnum, partdev):
"""
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 8f8735ce..3d2ded3d 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -9,10 +9,10 @@
"""
Keys to Console
---------------
-**Summary:** control which ssh keys may be written to console
+**Summary:** control which SSH keys may be written to console
-For security reasons it may be desirable not to write ssh fingerprints and keys
-to the console. To avoid the fingerprint of types of ssh keys being written to
+For security reasons it may be desirable not to write SSH fingerprints and keys
+to the console. To avoid the fingerprint of types of SSH keys being written to
console the ``ssh_fp_console_blacklist`` config key can be used. By default all
types of keys will have their fingerprints written to console. To avoid keys
of a key type being written to console the ``ssh_key_console_blacklist`` config
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 1c991d8d..5dd8de37 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -46,6 +46,8 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
from cloudinit import safeyaml, util
+from cloudinit.distros import rhel_util
+
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
@@ -123,7 +125,8 @@ def handle(name, cfg, cloud, log, _args):
# we need to have the salt minion service enabled in rc in order to be
# able to start the service. this does only apply on FreeBSD servers.
if cloud.distro.osfamily == 'freebsd':
- cloud.distro.updatercconf('salt_minion_enable', 'YES')
+ rhel_util.update_sysconfig_file(
+ '/etc/rc.conf', {'salt_minion_enable': 'YES'})
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 2bc8a6ef..75549b52 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -15,6 +15,9 @@ Any scripts in the ``scripts/per-instance`` directory on the datasource will
be run when a new instance is first booted. Scripts will be run in alphabetical
order. This module does not accept any config keys.
+Some cloud platforms change instance-id if a significant change was made to
+the system. As a result per-instance scripts will run again.
+
**Internal name:** ``cc_scripts_per_instance``
**Module frequency:** per instance
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 3f27ee34..259bdfab 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -12,8 +12,9 @@ Scripts Per Once
**Summary:** run one time scripts
Any scripts in the ``scripts/per-once`` directory on the datasource will be run
-only once. Scripts will be run in alphabetical order. This module does not
-accept any config keys.
+only once. Changes to the instance will not force a re-run. The only way to
+re-run these scripts is to run the clean subcommand and reboot. Scripts will
+be run in alphabetical order. This module does not accept any config keys.
**Internal name:** ``cc_scripts_per_once``
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index a0febc3c..10d6d197 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -21,9 +21,17 @@ key, and the fqdn of the cloud wil be used. If a fqdn specified with the
the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn``
will be used.
+This module will run in the init-local stage before networking is configured
+if the hostname is set by metadata or user data on the local system.
+
+This will occur on datasources like nocloud and ovf where metadata and user
+data are available locally. This ensures that the desired hostname is applied
+before any DHCP requests are preformed on these platforms where dynamic DNS is
+based on initial hostname.
+
**Internal name:** ``cc_set_hostname``
-**Module frequency:** per instance
+**Module frequency:** per always
**Supported distros:** all
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index c3c5b0ff..e3b39d8b 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -112,7 +112,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
elif util.is_false(pw_auth):
cfg_val = 'no'
else:
- bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
+ bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
if pw_auth is None or pw_auth.lower() == 'unchanged':
LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
else:
@@ -121,7 +121,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
updated = update_ssh_config({cfg_name: cfg_val})
if not updated:
- LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
+ LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
if 'systemctl' in service_cmd:
@@ -129,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
else:
cmd = list(service_cmd) + [service_name, "restart"]
util.subp(cmd)
- LOG.debug("Restarted the ssh daemon.")
+ LOG.debug("Restarted the SSH daemon.")
def handle(_name, cfg, cloud, log, args):
diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py
deleted file mode 100644
index afe297ee..00000000
--- a/cloudinit/config/cc_snap_config.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Ryan Harper <ryan.harper@canonical.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# RELEASE_BLOCKER: Remove this deprecated module in 18.3
-"""
-Snap Config
------------
-**Summary:** snap_config modules allows configuration of snapd.
-
-**Deprecated**: Use :ref:`snap` module instead. This module will not exist
-in cloud-init 18.3.
-
-This module uses the same ``snappy`` namespace for configuration but
-acts only only a subset of the configuration.
-
-If ``assertions`` is set and the user has included a list of assertions
-then cloud-init will collect the assertions into a single assertion file
-and invoke ``snap ack <path to file with assertions>`` which will attempt
-to load the provided assertions into the snapd assertion database.
-
-If ``email`` is set, this value is used to create an authorized user for
-contacting and installing snaps from the Ubuntu Store. This is done by
-calling ``snap create-user`` command.
-
-If ``known`` is set to True, then it is expected the user also included
-an assertion of type ``system-user``. When ``snap create-user`` is called
-cloud-init will append '--known' flag which instructs snapd to look for
-a system-user assertion with the details. If ``known`` is not set, then
-``snap create-user`` will contact the Ubuntu SSO for validating and importing
-a system-user for the instance.
-
-.. note::
- If the system is already managed, then cloud-init will not attempt to
- create a system-user.
-
-**Internal name:** ``cc_snap_config``
-
-**Module frequency:** per instance
-
-**Supported distros:** any with 'snapd' available
-
-**Config keys**::
-
- #cloud-config
- snappy:
- assertions:
- - |
- <assertion 1>
- - |
- <assertion 2>
- email: user@user.org
- known: true
-
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snap"
-ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
-
-
-"""
-snappy:
- assertions:
- - |
- <snap assertion 1>
- - |
- <snap assertion 2>
- email: foo@foo.io
- known: true
-"""
-
-
-def add_assertions(assertions=None):
- """Import list of assertions.
-
- Import assertions by concatenating each assertion into a
- string separated by a '\n'. Write this string to a instance file and
- then invoke `snap ack /path/to/file` and check for errors.
- If snap exits 0, then all assertions are imported.
- """
- if not assertions:
- assertions = []
-
- if not isinstance(assertions, list):
- raise ValueError(
- 'assertion parameter was not a list: {assertions}'.format(
- assertions=assertions))
-
- snap_cmd = [SNAPPY_CMD, 'ack']
- combined = "\n".join(assertions)
- if len(combined) == 0:
- raise ValueError("Assertion list is empty")
-
- for asrt in assertions:
- LOG.debug('Acking: %s', asrt.split('\n')[0:2])
-
- util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
- util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
-
-
-def add_snap_user(cfg=None):
- """Add a snap system-user if provided with email under snappy config.
-
- - Check that system is not already managed.
- - Check that if using a system-user assertion, that it's
- imported into snapd.
-
- Returns a dictionary to be passed to Distro.create_user
- """
-
- if not cfg:
- cfg = {}
-
- if not isinstance(cfg, dict):
- raise ValueError(
- 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg))
-
- snapuser = cfg.get('email', None)
- if not snapuser:
- return
-
- usercfg = {
- 'snapuser': snapuser,
- 'known': cfg.get('known', False),
- }
-
- # query if we're already registered
- out, _ = util.subp([SNAPPY_CMD, 'managed'], capture=True)
- if out.strip() == "true":
- LOG.warning('This device is already managed. '
- 'Skipping system-user creation')
- return
-
- if usercfg.get('known'):
- # Check that we imported a system-user assertion
- out, _ = util.subp([SNAPPY_CMD, 'known', 'system-user'],
- capture=True)
- if len(out) == 0:
- LOG.error('Missing "system-user" assertion. '
- 'Check "snappy" user-data assertions.')
- return
-
- return usercfg
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- LOG.debug('No snappy config provided, skipping')
- return
-
- log.warning(
- 'DEPRECATION: snap_config module will be dropped in 18.3 release.'
- ' Use snap module instead')
- if not(util.system_is_snappy()):
- LOG.debug("%s: system not snappy", name)
- return
-
- assertions = cfgin.get('assertions', [])
- if len(assertions) > 0:
- LOG.debug('Importing user-provided snap assertions')
- add_assertions(assertions)
-
- # Create a snap user if requested.
- # Snap systems contact the store with a user's email
- # and extract information needed to create a local user.
- # A user may provide a 'system-user' assertion which includes
- # the required information. Using such an assertion to create
- # a local user requires specifying 'known: true' in the supplied
- # user-data.
- usercfg = add_snap_user(cfg=cfgin)
- if usercfg:
- cloud.distro.create_user(usercfg.get('snapuser'), **usercfg)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
deleted file mode 100644
index b94cd04e..00000000
--- a/cloudinit/config/cc_snappy.py
+++ /dev/null
@@ -1,322 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# RELEASE_BLOCKER: Remove this deprecated module in 18.3
-"""
-Snappy
-------
-**Summary:** snappy modules allows configuration of snappy.
-
-**Deprecated**: Use :ref:`snap` module instead. This module will not exist
-in cloud-init 18.3.
-
-The below example config config would install ``etcd``, and then install
-``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has
-``config-blob`` inside it. If ``pkgname`` is installed already, then
-``snappy config pkgname <file>``
-will be called where ``file`` has ``pkgname-config-blob`` as its content.
-
-Entries in ``config`` can be namespaced or non-namespaced for a package.
-In either case, the config provided to snappy command is non-namespaced.
-The package name is provided as it appears.
-
-If ``packages_dir`` has files in it that end in ``.snap``, then they are
-installed. Given 3 files:
-
- - <packages_dir>/foo.snap
- - <packages_dir>/foo.config
- - <packages_dir>/bar.snap
-
-cloud-init will invoke:
-
- - snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- - snappy install <packages_dir>/bar.snap
-
-.. note::
- that if provided a ``config`` entry for ``ubuntu-core``, then
- cloud-init will invoke: snappy config ubuntu-core <config>
- Allowing you to configure ubuntu-core in this way.
-
-The ``ssh_enabled`` key controls the system's ssh service. The default value
-is ``auto``. Options are:
-
- - **True:** enable ssh service
- - **False:** disable ssh service
- - **auto:** enable ssh service if either ssh keys have been provided
- or user has requested password authentication (ssh_pwauth).
-
-**Internal name:** ``cc_snappy``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- #cloud-config
- snappy:
- system_snappy: auto
- ssh_enabled: auto
- packages: [etcd, pkg2.smoser]
- config:
- pkgname:
- key2: value2
- pkg2:
- key1: value1
- packages_dir: '/writable/user-data/cloud-init/snaps'
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import temp_utils
-from cloudinit import safeyaml
-from cloudinit import util
-
-import glob
-import os
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snappy"
-NAMESPACE_DELIM = '.'
-
-BUILTIN_CFG = {
- 'packages': [],
- 'packages_dir': '/writable/user-data/cloud-init/snaps',
- 'ssh_enabled': "auto",
- 'system_snappy': "auto",
- 'config': {},
-}
-
-distros = ['ubuntu']
-
-
-def parse_filename(fname):
- fname = os.path.basename(fname)
- fname_noext = fname.rpartition(".")[0]
- name = fname_noext.partition("_")[0]
- shortname = name.partition(".")[0]
- return(name, shortname, fname_noext)
-
-
-def get_fs_package_ops(fspath):
- if not fspath:
- return []
- ops = []
- for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
- (name, shortname, fname_noext) = parse_filename(snapfile)
- cfg = None
- for cand in (fname_noext, name, shortname):
- fpcand = os.path.sep.join([fspath, cand]) + ".config"
- if os.path.isfile(fpcand):
- cfg = fpcand
- break
- ops.append(makeop('install', name, config=None,
- path=snapfile, cfgfile=cfg))
- return ops
-
-
-def makeop(op, name, config=None, path=None, cfgfile=None):
- return({'op': op, 'name': name, 'config': config, 'path': path,
- 'cfgfile': cfgfile})
-
-
-def get_package_config(configs, name):
- # load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
- # over short name entry (config-example)
- if name in configs:
- return configs[name]
- return configs.get(name.partition(NAMESPACE_DELIM)[0])
-
-
-def get_package_ops(packages, configs, installed=None, fspath=None):
- # get the install an config operations that should be done
- if installed is None:
- installed = read_installed_packages()
- short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
-
- if not packages:
- packages = []
- if not configs:
- configs = {}
-
- ops = []
- ops += get_fs_package_ops(fspath)
-
- for name in packages:
- ops.append(makeop('install', name, get_package_config(configs, name)))
-
- to_install = [f['name'] for f in ops]
- short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
-
- for name in configs:
- if name in to_install:
- continue
- shortname = name.partition(NAMESPACE_DELIM)[0]
- if shortname in short_to_install:
- continue
- if name in installed or shortname in short_installed:
- ops.append(makeop('config', name,
- config=get_package_config(configs, name)))
-
- # prefer config entries to filepath entries
- for op in ops:
- if op['op'] != 'install' or not op['cfgfile']:
- continue
- name = op['name']
- fromcfg = get_package_config(configs, op['name'])
- if fromcfg:
- LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
- op['cfgfile'] = None
- op['config'] = fromcfg
-
- return ops
-
-
-def render_snap_op(op, name, path=None, cfgfile=None, config=None):
- if op not in ('install', 'config'):
- raise ValueError("cannot render op '%s'" % op)
-
- shortname = name.partition(NAMESPACE_DELIM)[0]
- try:
- cfg_tmpf = None
- if config is not None:
- # input to 'snappy config packagename' must have nested data. odd.
- # config:
- # packagename:
- # config
- # Note, however, we do not touch config files on disk.
- nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = temp_utils.mkstemp()
- os.write(fd, safeyaml.dumps(nested_cfg).encode())
- os.close(fd)
- cfgfile = cfg_tmpf
-
- cmd = [SNAPPY_CMD, op]
- if op == 'install':
- if path:
- cmd.append("--allow-unauthenticated")
- cmd.append(path)
- else:
- cmd.append(name)
- if cfgfile:
- cmd.append(cfgfile)
- elif op == 'config':
- cmd += [name, cfgfile]
-
- util.subp(cmd)
-
- finally:
- if cfg_tmpf:
- os.unlink(cfg_tmpf)
-
-
-def read_installed_packages():
- ret = []
- for (name, _date, _version, dev) in read_pkg_data():
- if dev:
- ret.append(NAMESPACE_DELIM.join([name, dev]))
- else:
- ret.append(name)
- return ret
-
-
-def read_pkg_data():
- out, _err = util.subp([SNAPPY_CMD, "list"])
- pkg_data = []
- for line in out.splitlines()[1:]:
- toks = line.split(sep=None, maxsplit=3)
- if len(toks) == 3:
- (name, date, version) = toks
- dev = None
- else:
- (name, date, version, dev) = toks
- pkg_data.append((name, date, version, dev,))
- return pkg_data
-
-
-def disable_enable_ssh(enabled):
- LOG.debug("setting enablement of ssh to: %s", enabled)
- # do something here that would enable or disable
- not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
- if enabled:
- util.del_file(not_to_be_run)
- # this is an indempotent operation
- util.subp(["systemctl", "start", "ssh"])
- else:
- # this is an indempotent operation
- util.subp(["systemctl", "stop", "ssh"])
- util.write_file(not_to_be_run, "cloud-init\n")
-
-
-def set_snappy_command():
- global SNAPPY_CMD
- if util.which("snappy-go"):
- SNAPPY_CMD = "snappy-go"
- elif util.which("snappy"):
- SNAPPY_CMD = "snappy"
- else:
- SNAPPY_CMD = "snap"
- LOG.debug("snappy command is '%s'", SNAPPY_CMD)
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- sys_snappy = str(mycfg.get("system_snappy", "auto"))
- if util.is_false(sys_snappy):
- LOG.debug("%s: System is not snappy. disabling", name)
- return
-
- if sys_snappy.lower() == "auto" and not(util.system_is_snappy()):
- LOG.debug("%s: 'auto' mode, and system not snappy", name)
- return
-
- log.warning(
- 'DEPRECATION: snappy module will be dropped in 18.3 release.'
- ' Use snap module instead')
-
- set_snappy_command()
-
- pkg_ops = get_package_ops(packages=mycfg['packages'],
- configs=mycfg['config'],
- fspath=mycfg['packages_dir'])
-
- fails = []
- for pkg_op in pkg_ops:
- try:
- render_snap_op(**pkg_op)
- except Exception as e:
- fails.append((pkg_op, e,))
- LOG.warning("'%s' failed for '%s': %s",
- pkg_op['op'], pkg_op['name'], e)
-
- # Default to disabling SSH
- ssh_enabled = mycfg.get('ssh_enabled', "auto")
-
- # If the user has not explicitly enabled or disabled SSH, then enable it
- # when password SSH authentication is requested or there are SSH keys
- if ssh_enabled == "auto":
- user_ssh_keys = cloud.get_public_ssh_keys() or None
- password_auth_enabled = cfg.get('ssh_pwauth', False)
- if user_ssh_keys:
- LOG.debug("Enabling SSH, ssh keys found in datasource")
- ssh_enabled = True
- elif cfg.get('ssh_authorized_keys'):
- LOG.debug("Enabling SSH, ssh keys found in config")
- elif password_auth_enabled:
- LOG.debug("Enabling SSH, password authentication requested")
- ssh_enabled = True
- elif ssh_enabled not in (True, False):
- LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled)
-
- disable_enable_ssh(ssh_enabled)
-
- if fails:
- raise Exception("failed to install/configure snaps")
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 050285a8..163cce99 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -9,43 +9,23 @@
"""
SSH
---
-**Summary:** configure ssh and ssh keys
+**Summary:** configure SSH and SSH keys (host and authorized)
-This module handles most configuration for ssh and ssh keys. Many images have
-default ssh keys, which can be removed using ``ssh_deletekeys``. Since removing
-default keys is usually the desired behavior this option is enabled by default.
+This module handles most configuration for SSH and both host and authorized SSH
+keys.
-Keys can be added using the ``ssh_keys`` configuration key. The argument to
-this config key should be a dictionary entries for the public and private keys
-of each desired key type. Entries in the ``ssh_keys`` config dict should
-have keys in the format ``<key type>_private`` and ``<key type>_public``, e.g.
-``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported key
-types. Not all key types have to be specified, ones left unspecified will not
-be used. If this config option is used, then no keys will be generated.
+Authorized Keys
+^^^^^^^^^^^^^^^
-.. note::
- when specifying private keys in cloud-config, care should be taken to
- ensure that the communication between the data source and the instance is
- secure
+Authorized keys are a list of public SSH keys that are allowed to connect to a
+a user account on a system. They are stored in `.ssh/authorized_keys` in that
+account's home directory. Authorized keys for the default user defined in
+``users`` can be specified using ``ssh_authorized_keys``. Keys
+should be specified as a list of public keys.
.. note::
- to specify multiline private keys, use yaml multiline syntax
-
-If no keys are specified using ``ssh_keys``, then keys will be generated using
-``ssh-keygen``. By default one public/private pair of each supported key type
-will be generated. The key types to generate can be specified using the
-``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For
-each key type for which this module has been instructed to create a keypair, if
-a key of the same type is already present on the system (i.e. if
-``ssh_deletekeys`` was false), no key will be generated.
-
-Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config
-flags are:
-
- - rsa
- - dsa
- - ecdsa
- - ed25519
+ see the ``cc_set_passwords`` module documentation to enable/disable SSH
+ password authentication
Root login can be enabled/disabled using the ``disable_root`` config key. Root
login options can be manually specified with ``disable_root_opts``. If
@@ -55,17 +35,46 @@ root login is disabled, and root login opts are set to::
no-port-forwarding,no-agent-forwarding,no-X11-forwarding
-Authorized keys for the default user/first user defined in ``users`` can be
-specified using ``ssh_authorized_keys``. Keys should be specified as a list of
-public keys.
+Host Keys
+^^^^^^^^^
+
+Host keys are for authenticating a specific instance. Many images have default
+host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents
+re-use of a private host key from an image on multiple machines. Since
+removing default host keys is usually the desired behavior this option is
+enabled by default.
-Importing ssh public keys for the default user (defined in ``users``)) is
-enabled by default. This feature may be disabled by setting
-``allow_publish_ssh_keys: false``.
+Host keys can be added using the ``ssh_keys`` configuration key. The argument
+to this config key should be a dictionary entries for the public and private
+keys of each desired key type. Entries in the ``ssh_keys`` config dict should
+have keys in the format ``<key type>_private`` and ``<key type>_public``,
+e.g. ``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported
+key types. Not all key types have to be specified, ones left unspecified will
+not be used. If this config option is used, then no keys will be generated.
.. note::
- see the ``cc_set_passwords`` module documentation to enable/disable ssh
- password authentication
+ when specifying private host keys in cloud-config, care should be taken to
+ ensure that the communication between the data source and the instance is
+ secure
+
+.. note::
+ to specify multiline private host keys, use yaml multiline syntax
+
+If no host keys are specified using ``ssh_keys``, then keys will be generated
+using ``ssh-keygen``. By default one public/private pair of each supported
+host key type will be generated. The key types to generate can be specified
+using the ``ssh_genkeytypes`` config flag, which accepts a list of host key
+types to use. For each host key type for which this module has been instructed
+to create a keypair, if a key of the same type is already present on the
+system (i.e. if ``ssh_deletekeys`` was false), no key will be generated.
+
+Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``
+config flags are:
+
+ - rsa
+ - dsa
+ - ecdsa
+ - ed25519
**Internal name:** ``cc_ssh``
@@ -216,7 +225,7 @@ def handle(_name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
keys = cloud.get_public_ssh_keys() or []
else:
- log.debug('Skipping import of publish ssh keys per '
+ log.debug('Skipping import of publish SSH keys per '
'config setting: allow_public_ssh_keys=False')
if "ssh_authorized_keys" in cfg:
@@ -225,7 +234,7 @@ def handle(_name, cfg, cloud, log, _args):
apply_credentials(keys, user, disable_root, disable_root_opts)
except Exception:
- util.logexc(log, "Applying ssh credentials failed!")
+ util.logexc(log, "Applying SSH credentials failed!")
def apply_credentials(keys, user, disable_root, disable_root_opts):
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 98b0e665..dcf86fdc 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -7,7 +7,7 @@
"""
SSH Authkey Fingerprints
------------------------
-**Summary:** log fingerprints of user ssh keys
+**Summary:** log fingerprints of user SSH keys
Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
@@ -68,7 +68,7 @@ def _is_printable_key(entry):
def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
prefix='ci-info: '):
if not key_entries:
- message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
+ message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
% (prefix, user))
util.multi_log(message)
return
@@ -98,7 +98,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
def handle(name, cfg, cloud, log, _args):
if util.is_true(cfg.get('no_ssh_fingerprints', False)):
log.debug(("Skipping module named %s, "
- "logging of ssh fingerprints disabled"), name)
+ "logging of SSH fingerprints disabled"), name)
return
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 6b46dafe..63f87298 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -9,9 +9,9 @@
"""
SSH Import Id
-------------
-**Summary:** import ssh id
+**Summary:** import SSH id
-This module imports ssh keys from either a public keyserver, usually launchpad
+This module imports SSH keys from either a public keyserver, usually launchpad
or github using ``ssh-import-id``. Keys are referenced by the username they are
associated with on the keyserver. The keyserver can be specified by prepending
either ``lp:`` for launchpad or ``gh:`` for github to the username.
@@ -98,12 +98,12 @@ def import_ssh_ids(ids, user, log):
raise exc
cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
- log.debug("Importing ssh ids for user %s.", user)
+ log.debug("Importing SSH ids for user %s.", user)
try:
util.subp(cmd, capture=False)
except util.ProcessExecutionError as exc:
- util.logexc(log, "Failed to run command to import %s ssh ids", user)
+ util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index c32a743a..13764e60 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -51,14 +51,14 @@ config keys for an entry in ``users`` are as follows:
a Snappy user through ``snap create-user``. If an Ubuntu SSO account is
associated with the address, username and SSH keys will be requested from
there. Default: none
- - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
+ - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's
authkeys file. Default: none. This key can not be combined with
``ssh_redirect_user``.
- ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
This key can not be combined with ``ssh_redirect_user``.
- ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
- logins for this user. When specified, all cloud meta-data public ssh
- keys will be set up in a disabled state for this username. Any ssh login
+ logins for this user. When specified, all cloud meta-data public SSH
+ keys will be set up in a disabled state for this username. Any SSH login
as this username will timeout and prompt with a message to login instead
as the configured <default_username> for this instance. Default: false.
This key can not be combined with ``ssh_import_id`` or
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index 639fb9ea..85e2f1fe 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -45,7 +45,7 @@ class TestHandleSshPwauth(CiTestCase):
"""If config is not updated, then no system restart should be done."""
setpass.handle_ssh_pwauth(True)
m_subp.assert_not_called()
- self.assertIn("No need to restart ssh", self.logs.getvalue())
+ self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
@mock.patch(MODPATH + "util.subp")
@@ -80,7 +80,7 @@ class TestSetPasswordsHandle(CiTestCase):
setpass.handle(
'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[])
self.assertEqual(
- "DEBUG: Leaving ssh config 'PasswordAuthentication' unchanged. "
+ "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
'ssh_pwauth=None\n',
self.logs.getvalue())
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
index ba0afae3..f620b597 100644
--- a/cloudinit/config/tests/test_users_groups.py
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -46,6 +46,34 @@ class TestHandleUsersGroups(CiTestCase):
mock.call('me2', default=False)])
m_group.assert_not_called()
+ @mock.patch('cloudinit.distros.freebsd.Distro.create_group')
+ @mock.patch('cloudinit.distros.freebsd.Distro.create_user')
+ def test_handle_users_in_cfg_calls_create_users_on_bsd(
+ self,
+ m_fbsd_user,
+ m_fbsd_group,
+ m_linux_user,
+ m_linux_group,
+ ):
+ """When users in config, create users with freebsd.create_user."""
+ cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True,
+ 'groups': ['wheel'],
+ 'shell': '/bin/tcsh'}}
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_fbsd_user.call_args_list,
+ [mock.call('freebsd', groups='wheel', lock_passwd=True,
+ shell='/bin/tcsh'),
+ mock.call('me2', default=False)])
+ m_fbsd_group.assert_not_called()
+ m_linux_group.assert_not_called()
+ m_linux_user.assert_not_called()
+
def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
"""When ssh_redirect_user is True pass default user and cloud keys."""
cfg = {
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
index 51c09582..8bac9c44 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/cs_utils.py
@@ -14,7 +14,7 @@ Having the server definition accessible by the VM can ve useful in various
ways. For example it is possible to easily determine from within the VM,
which network interfaces are connected to public and which to private network.
Another use is to pass some data to initial VM setup scripts, like setting the
-hostname to the VM name or passing ssh public keys through server meta.
+hostname to the VM name or passing SSH public keys through server meta.
For more information take a look at the Server Context section of CloudSigma
API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 2ec79577..cdce26f2 100644..100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -36,7 +36,7 @@ ALL_DISTROS = 'all'
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
- 'redhat': ['centos', 'fedora', 'rhel'],
+ 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
'suse': ['opensuse', 'sles'],
@@ -145,7 +145,7 @@ class Distro(object):
# Write it out
# pylint: disable=assignment-from-no-return
- # We have implementations in arch, freebsd and gentoo still
+ # We have implementations in arch and gentoo still
dev_names = self._write_network(settings)
# pylint: enable=assignment-from-no-return
# Now try to bring them up
@@ -385,7 +385,7 @@ class Distro(object):
Add a user to the system using standard GNU tools
"""
# XXX need to make add_user idempotent somehow as we
- # still want to add groups or modify ssh keys on pre-existing
+ # still want to add groups or modify SSH keys on pre-existing
# users in the image.
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
@@ -561,7 +561,7 @@ class Distro(object):
cloud_keys = kwargs.get('cloud_public_ssh_keys', [])
if not cloud_keys:
LOG.warning(
- 'Unable to disable ssh logins for %s given'
+ 'Unable to disable SSH logins for %s given'
' ssh_redirect_user: %s. No cloud public-keys present.',
name, kwargs['ssh_redirect_user'])
else:
diff --git a/cloudinit/distros/amazon.py b/cloudinit/distros/amazon.py
new file mode 100644
index 00000000..ff9a549f
--- /dev/null
+++ b/cloudinit/distros/amazon.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(rhel.Distro):
+
+ def update_package_sources(self):
+ return None
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 8e5ae96c..40e435e7 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -13,12 +13,10 @@ import re
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import ssh_util
from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-
+from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -29,9 +27,8 @@ class Distro(distros.Distro):
rc_conf_fn = "/etc/rc.conf"
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
- resolv_conf_fn = '/etc/resolv.conf'
ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
- default_primary_nic = 'hn0'
+ hostname_conf_fn = '/etc/rc.conf'
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -40,99 +37,8 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'freebsd'
- self.ipv4_pat = re.compile(r"\s+inet\s+\d+[.]\d+[.]\d+[.]\d+")
cfg['ssh_svcname'] = 'sshd'
- # Updates a key in /etc/rc.conf.
- def updatercconf(self, key, value):
- LOG.debug("Checking %s for: %s = %s", self.rc_conf_fn, key, value)
- conf = self.loadrcconf()
- config_changed = False
- if key not in conf:
- LOG.debug("Adding key in %s: %s = %s", self.rc_conf_fn, key,
- value)
- conf[key] = value
- config_changed = True
- else:
- for item in conf.keys():
- if item == key and conf[item] != value:
- conf[item] = value
- LOG.debug("Changing key in %s: %s = %s", self.rc_conf_fn,
- key, value)
- config_changed = True
-
- if config_changed:
- LOG.info("Writing %s", self.rc_conf_fn)
- buf = StringIO()
- for keyval in conf.items():
- buf.write('%s="%s"\n' % keyval)
- util.write_file(self.rc_conf_fn, buf.getvalue())
-
- # Load the contents of /etc/rc.conf and store all keys in a dict. Make sure
- # quotes are ignored:
- # hostname="bla"
- def loadrcconf(self):
- RE_MATCH = re.compile(r'^(\w+)\s*=\s*(.*)\s*')
- conf = {}
- lines = util.load_file(self.rc_conf_fn).splitlines()
- for line in lines:
- m = RE_MATCH.match(line)
- if not m:
- LOG.debug("Skipping line from /etc/rc.conf: %s", line)
- continue
- key = m.group(1).rstrip()
- val = m.group(2).rstrip()
- # Kill them quotes (not completely correct, aka won't handle
- # quoted values, but should be ok ...)
- if val[0] in ('"', "'"):
- val = val[1:]
- if val[-1] in ('"', "'"):
- val = val[0:-1]
- if len(val) == 0:
- LOG.debug("Skipping empty value from /etc/rc.conf: %s", line)
- continue
- conf[key] = val
- return conf
-
- def readrcconf(self, key):
- conf = self.loadrcconf()
- try:
- val = conf[key]
- except KeyError:
- val = None
- return val
-
- # NOVA will inject something like eth0, rewrite that to use the FreeBSD
- # adapter. Since this adapter is based on the used driver, we need to
- # figure out which interfaces are available. On KVM platforms this is
- # vtnet0, where Xen would use xn0.
- def getnetifname(self, dev):
- LOG.debug("Translating network interface %s", dev)
- if dev.startswith('lo'):
- return dev
-
- n = re.search(r'\d+$', dev)
- index = n.group(0)
-
- (out, _err) = util.subp(['ifconfig', '-a'])
- ifconfigoutput = [x for x in (out.strip()).splitlines()
- if len(x.split()) > 0]
- bsddev = 'NOT_FOUND'
- for line in ifconfigoutput:
- m = re.match(r'^\w+', line)
- if m:
- if m.group(0).startswith('lo'):
- continue
- # Just settle with the first non-lo adapter we find, since it's
- # rather unlikely there will be multiple nicdrivers involved.
- bsddev = m.group(0)
- break
-
- # Replace the index with the one we're after.
- bsddev = re.sub(r'\d+$', index, bsddev)
- LOG.debug("Using network interface %s", bsddev)
- return bsddev
-
def _select_hostname(self, hostname, fqdn):
# Should be FQDN if available. See rc.conf(5) in FreeBSD
if fqdn:
@@ -140,46 +46,44 @@ class Distro(distros.Distro):
return hostname
def _read_system_hostname(self):
- sys_hostname = self._read_hostname(filename=None)
- return ('rc.conf', sys_hostname)
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- hostname = self.readrcconf('hostname')
- except IOError:
- pass
- if not hostname:
+ (_exists, contents) = rhel_util.read_sysconfig_file(filename)
+ if contents.get('hostname'):
+ return contents['hostname']
+ else:
return default
- return hostname
def _write_hostname(self, hostname, filename):
- self.updatercconf('hostname', hostname)
+ rhel_util.update_sysconfig_file(filename, {'hostname': hostname})
def create_group(self, name, members):
- group_add_cmd = ['pw', '-n', name]
+ group_add_cmd = ['pw', 'group', 'add', name]
if util.is_group(name):
LOG.warning("Skipping creation of existing group '%s'", name)
else:
try:
util.subp(group_add_cmd)
LOG.info("Created new group %s", name)
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to create group %s", name)
- raise e
-
- if len(members) > 0:
- for member in members:
- if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
- try:
- util.subp(['pw', 'usermod', '-n', name, '-G', member])
- LOG.info("Added user '%s' to group '%s'", member, name)
- except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
+ raise
+ if not members:
+ members = []
+
+ for member in members:
+ if not util.is_user(member):
+ LOG.warning("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
+ continue
+ try:
+ util.subp(['pw', 'usermod', '-n', name, '-G', member])
+ LOG.info("Added user '%s' to group '%s'", member, name)
+ except Exception:
+ util.logexc(LOG, "Failed to add user '%s' to group '%s'",
+ member, name)
def add_user(self, name, **kwargs):
if util.is_user(name):
@@ -225,9 +129,9 @@ class Distro(distros.Distro):
LOG.info("Adding user %s", name)
try:
util.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to create user %s", name)
- raise e
+ raise
# Set the password if it is provided
# For security consideration, only hashed passwd is assumed
passwd_val = kwargs.get('passwd', None)
@@ -237,9 +141,9 @@ class Distro(distros.Distro):
def expire_passwd(self, user):
try:
util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
- raise e
+ raise
def set_passwd(self, user, passwd, hashed=False):
if hashed:
@@ -250,16 +154,16 @@ class Distro(distros.Distro):
try:
util.subp(['pw', 'usermod', user, hash_opt, '0'],
data=passwd, logstring="chpasswd for %s" % user)
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
- raise e
+ raise
def lock_passwd(self, name):
try:
util.subp(['pw', 'usermod', name, '-h', '-'])
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
- raise e
+ raise
def create_user(self, name, **kwargs):
self.add_user(name, **kwargs)
@@ -282,309 +186,16 @@ class Distro(distros.Distro):
keys = set(kwargs['ssh_authorized_keys']) or []
ssh_util.setup_user_keys(keys, name, options=None)
- @staticmethod
- def get_ifconfig_list():
- cmd = ['ifconfig', '-l']
- (nics, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
- return None
- return nics
-
- @staticmethod
- def get_ifconfig_ifname_out(ifname):
- cmd = ['ifconfig', ifname]
- (if_result, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
- return None
- return if_result
-
- @staticmethod
- def get_ifconfig_ether():
- cmd = ['ifconfig', '-l', 'ether']
- (nics, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
- return None
- return nics
-
- @staticmethod
- def get_interface_mac(ifname):
- if_result = Distro.get_ifconfig_ifname_out(ifname)
- for item in if_result.splitlines():
- if item.find('ether ') != -1:
- mac = str(item.split()[1])
- if mac:
- return mac
-
- @staticmethod
- def get_devicelist():
- nics = Distro.get_ifconfig_list()
- return nics.split()
-
- @staticmethod
- def get_ipv6():
- ipv6 = []
- nics = Distro.get_devicelist()
- for nic in nics:
- if_result = Distro.get_ifconfig_ifname_out(nic)
- for item in if_result.splitlines():
- if item.find("inet6 ") != -1 and item.find("scopeid") == -1:
- ipv6.append(nic)
- return ipv6
-
- def get_ipv4(self):
- ipv4 = []
- nics = Distro.get_devicelist()
- for nic in nics:
- if_result = Distro.get_ifconfig_ifname_out(nic)
- for item in if_result.splitlines():
- print(item)
- if self.ipv4_pat.match(item):
- ipv4.append(nic)
- return ipv4
-
- def is_up(self, ifname):
- if_result = Distro.get_ifconfig_ifname_out(ifname)
- pat = "^" + ifname
- for item in if_result.splitlines():
- if re.match(pat, item):
- flags = item.split('<')[1].split('>')[0]
- if flags.find("UP") != -1:
- return True
-
- def _get_current_rename_info(self, check_downable=True):
- """Collect information necessary for rename_interfaces."""
- names = Distro.get_devicelist()
- bymac = {}
- for n in names:
- bymac[Distro.get_interface_mac(n)] = {
- 'name': n, 'up': self.is_up(n), 'downable': None}
-
- nics_with_addresses = set()
- if check_downable:
- nics_with_addresses = set(self.get_ipv4() + self.get_ipv6())
-
- for d in bymac.values():
- d['downable'] = (d['up'] is False or
- d['name'] not in nics_with_addresses)
-
- return bymac
-
- def _rename_interfaces(self, renames):
- if not len(renames):
- LOG.debug("no interfaces to rename")
- return
-
- current_info = self._get_current_rename_info()
-
- cur_bymac = {}
- for mac, data in current_info.items():
- cur = data.copy()
- cur['mac'] = mac
- cur_bymac[mac] = cur
-
- def update_byname(bymac):
- return dict((data['name'], data)
- for data in bymac.values())
-
- def rename(cur, new):
- util.subp(["ifconfig", cur, "name", new], capture=True)
-
- def down(name):
- util.subp(["ifconfig", name, "down"], capture=True)
-
- def up(name):
- util.subp(["ifconfig", name, "up"], capture=True)
-
- ops = []
- errors = []
- ups = []
- cur_byname = update_byname(cur_bymac)
- tmpname_fmt = "cirename%d"
- tmpi = -1
-
- for mac, new_name in renames:
- cur = cur_bymac.get(mac, {})
- cur_name = cur.get('name')
- cur_ops = []
- if cur_name == new_name:
- # nothing to do
- continue
-
- if not cur_name:
- errors.append("[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
- continue
-
- if cur['up']:
- msg = "[busy] Error renaming mac=%s from %s to %s"
- if not cur['downable']:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- cur['up'] = False
- cur_ops.append(("down", mac, new_name, (cur_name,)))
- ups.append(("up", mac, new_name, (new_name,)))
-
- if new_name in cur_byname:
- target = cur_byname[new_name]
- if target['up']:
- msg = "[busy-target] Error renaming mac=%s from %s to %s."
- if not target['downable']:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- else:
- cur_ops.append(("down", mac, new_name, (new_name,)))
-
- tmp_name = None
- while tmp_name is None or tmp_name in cur_byname:
- tmpi += 1
- tmp_name = tmpname_fmt % tmpi
-
- cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
- target['name'] = tmp_name
- cur_byname = update_byname(cur_bymac)
- if target['up']:
- ups.append(("up", mac, new_name, (tmp_name,)))
-
- cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
- cur['name'] = new_name
- cur_byname = update_byname(cur_bymac)
- ops += cur_ops
-
- opmap = {'rename': rename, 'down': down, 'up': up}
- if len(ops) + len(ups) == 0:
- if len(errors):
- LOG.debug("unable to do any work for renaming of %s", renames)
- else:
- LOG.debug("no work necessary for renaming of %s", renames)
- else:
- LOG.debug("achieving renaming of %s with ops %s",
- renames, ops + ups)
-
- for op, mac, new_name, params in ops + ups:
- try:
- opmap.get(op)(*params)
- except Exception as e:
- errors.append(
- "[unknown] Error performing %s%s for %s, %s: %s" %
- (op, params, mac, new_name, e))
- if len(errors):
- raise Exception('\n'.join(errors))
-
- def apply_network_config_names(self, netcfg):
- renames = []
- for ent in netcfg.get('config', {}):
- if ent.get('type') != 'physical':
- continue
- mac = ent.get('mac_address')
- name = ent.get('name')
- if not mac:
- continue
- renames.append([mac, name])
- return self._rename_interfaces(renames)
-
- @classmethod
def generate_fallback_config(self):
- nics = Distro.get_ifconfig_ether()
- if nics is None:
- LOG.debug("Fail to get network interfaces")
- return None
- potential_interfaces = nics.split()
- connected = []
- for nic in potential_interfaces:
- pat = "^" + nic
- if_result = Distro.get_ifconfig_ifname_out(nic)
- for item in if_result.split("\n"):
- if re.match(pat, item):
- flags = item.split('<')[1].split('>')[0]
- if flags.find("RUNNING") != -1:
- connected.append(nic)
- if connected:
- potential_interfaces = connected
- names = list(sorted(potential_interfaces))
- default_pri_nic = Distro.default_primary_nic
- if default_pri_nic in names:
- names.remove(default_pri_nic)
- names.insert(0, default_pri_nic)
- target_name = None
- target_mac = None
- for name in names:
- mac = Distro.get_interface_mac(name)
- if mac:
- target_name = name
- target_mac = mac
- break
- if target_mac and target_name:
- nconf = {'config': [], 'version': 1}
+ nconf = {'config': [], 'version': 1}
+ for mac, name in net.get_interfaces_by_mac().items():
nconf['config'].append(
- {'type': 'physical', 'name': target_name,
- 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]})
- return nconf
- else:
- return None
-
- def _write_network(self, settings):
- entries = net_util.translate_network(settings)
- nameservers = []
- searchdomains = []
- dev_names = entries.keys()
- for (device, info) in entries.items():
- # Skip the loopback interface.
- if device.startswith('lo'):
- continue
-
- dev = self.getnetifname(device)
-
- LOG.info('Configuring interface %s', dev)
-
- if info.get('bootproto') == 'static':
- LOG.debug('Configuring dev %s with %s / %s', dev,
- info.get('address'), info.get('netmask'))
- # Configure an ipv4 address.
- ifconfig = (info.get('address') + ' netmask ' +
- info.get('netmask'))
-
- # Configure the gateway.
- self.updatercconf('defaultrouter', info.get('gateway'))
-
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchdomains.extend(info['dns-search'])
- else:
- ifconfig = 'DHCP'
-
- self.updatercconf('ifconfig_' + dev, ifconfig)
-
- # Try to read the /etc/resolv.conf or just start from scratch if that
- # fails.
- try:
- resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn))
- resolvconf.parse()
- except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- self.resolv_conf_fn)
- resolvconf = ResolvConf('')
- resolvconf.parse()
-
- # Add some nameservers
- for server in nameservers:
- try:
- resolvconf.add_nameserver(server)
- except ValueError:
- util.logexc(LOG, "Failed to add nameserver %s", server)
-
- # And add any searchdomains.
- for domain in searchdomains:
- try:
- resolvconf.add_search_domain(domain)
- except ValueError:
- util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644)
+ {'type': 'physical', 'name': name,
+ 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ return nconf
- return dev_names
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
def apply_locale(self, locale, out_fn=None):
# Adjust the locals value to the new value
@@ -612,18 +223,12 @@ class Distro(distros.Distro):
util.logexc(LOG, "Failed to restore %s backup",
self.login_conf_fn)
- def _bring_up_interface(self, device_name):
- if device_name.startswith('lo'):
- return
- dev = self.getnetifname(device_name)
- cmd = ['/etc/rc.d/netif', 'start', dev]
- LOG.debug("Attempting to bring up interface %s using command %s",
- dev, cmd)
- # This could return 1 when the interface has already been put UP by the
- # OS. This is just fine.
- (_out, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
+ def apply_network_config_names(self, netconfig):
+ # This is handled by the freebsd network renderer. It writes in
+ # /etc/rc.conf a line with the following format:
+ # ifconfig_OLDNAME_name=NEWNAME
+ # FreeBSD network script will rename the interface automatically.
+ return
def install_packages(self, pkglist):
self.update_package_sources()
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index bd806378..1d5eb535 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -307,6 +307,9 @@ def device_devid(devname):
def get_devicelist():
+ if util.is_FreeBSD():
+ return list(get_interfaces_by_mac().values())
+
try:
devs = os.listdir(get_sys_class_path())
except OSError as e:
@@ -329,6 +332,35 @@ def is_disabled_cfg(cfg):
def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
+ if util.is_FreeBSD():
+ return find_fallback_nic_on_freebsd(blacklist_drivers)
+ else:
+ return find_fallback_nic_on_linux(blacklist_drivers)
+
+
+def find_fallback_nic_on_freebsd(blacklist_drivers=None):
+ """Return the name of the 'fallback' network device on FreeBSD.
+
+ @param blacklist_drivers: currently ignored
+ @return default interface, or None
+
+
+ we'll use the first interface from ``ifconfig -l -u ether``
+ """
+ stdout, _stderr = util.subp(['ifconfig', '-l', '-u', 'ether'])
+ values = stdout.split()
+ if values:
+ return values[0]
+ # On FreeBSD <= 10, 'ifconfig -l' ignores the interfaces with DOWN
+ # status
+ values = list(get_interfaces_by_mac().values())
+ values.sort()
+ if values:
+ return values[0]
+
+
+def find_fallback_nic_on_linux(blacklist_drivers=None):
+ """Return the name of the 'fallback' network device on Linux."""
if not blacklist_drivers:
blacklist_drivers = []
@@ -765,6 +797,40 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac():
+ if util.is_FreeBSD():
+ return get_interfaces_by_mac_on_freebsd()
+ else:
+ return get_interfaces_by_mac_on_linux()
+
+
+def get_interfaces_by_mac_on_freebsd():
+ (out, _) = util.subp(['ifconfig', '-a', 'ether'])
+
+ # flatten each interface block in a single line
+ def flatten(out):
+ curr_block = ''
+ for l in out.split('\n'):
+ if l.startswith('\t'):
+ curr_block += l
+ else:
+ if curr_block:
+ yield curr_block
+ curr_block = l
+ yield curr_block
+
+ # looks for interface and mac in a list of flatten block
+ def find_mac(flat_list):
+ for block in flat_list:
+ m = re.search(
+ r"^(?P<ifname>\S*): .*ether\s(?P<mac>[\da-f:]{17}).*",
+ block)
+ if m:
+ yield (m.group('mac'), m.group('ifname'))
+ results = {mac: ifname for mac, ifname in find_mac(flatten(out))}
+ return results
+
+
+def get_interfaces_by_mac_on_linux():
"""Build a dictionary of tuples {mac: name}.
Bridges and any devices that have a 'stolen' mac are excluded."""
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 17379918..c033cc8e 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -92,9 +92,12 @@ class EphemeralDHCPv4(object):
nmap = {'interface': 'interface', 'ip': 'fixed-address',
'prefix_or_mask': 'subnet-mask',
'broadcast': 'broadcast-address',
- 'static_routes': 'rfc3442-classless-static-routes',
+ 'static_routes': [
+ 'rfc3442-classless-static-routes',
+ 'classless-static-routes'
+ ],
'router': 'routers'}
- kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()])
+ kwargs = self.extract_dhcp_options_mapping(nmap)
if not kwargs['broadcast']:
kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip'])
if kwargs['static_routes']:
@@ -107,6 +110,25 @@ class EphemeralDHCPv4(object):
self._ephipv4 = ephipv4
return self.lease
+ def extract_dhcp_options_mapping(self, nmap):
+ result = {}
+ for internal_reference, lease_option_names in nmap.items():
+ if isinstance(lease_option_names, list):
+ self.get_first_option_value(
+ internal_reference,
+ lease_option_names,
+ result
+ )
+ else:
+ result[internal_reference] = self.lease.get(lease_option_names)
+ return result
+
+ def get_first_option_value(self, internal_mapping,
+ lease_option_names, result):
+ for different_names in lease_option_names:
+ if not result.get(internal_mapping):
+ result[internal_mapping] = self.lease.get(different_names)
+
def maybe_perform_dhcp_discovery(nic=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
@@ -281,24 +303,30 @@ def parse_static_routes(rfc3442):
""" parse rfc3442 format and return a list containing tuple of strings.
The tuple is composed of the network_address (including net length) and
- gateway for a parsed static route.
+ gateway for a parsed static route. It can parse two formats of rfc3442,
+ one from dhcpcd and one from dhclient (isc).
- @param rfc3442: string in rfc3442 format
+ @param rfc3442: string in rfc3442 format (isc or dhcpd)
@returns: list of tuple(str, str) for all valid parsed routes until the
first parsing error.
E.g.
- sr = parse_state_routes("32,169,254,169,254,130,56,248,255,0,130,56,240,1")
- sr = [
+ sr=parse_static_routes("32,169,254,169,254,130,56,248,255,0,130,56,240,1")
+ sr=[
("169.254.169.254/32", "130.56.248.255"), ("0.0.0.0/0", "130.56.240.1")
]
+ sr2 = parse_static_routes("24.191.168.128 192.168.128.1,0 192.168.128.1")
+ sr2 = [
+ ("191.168.128.0/24", "192.168.128.1"), ("0.0.0.0/0", "192.168.128.1")
+ ]
+
Python version of isc-dhclient's hooks:
/etc/dhcp/dhclient-exit-hooks.d/rfc3442-classless-routes
"""
# raw strings from dhcp lease may end in semi-colon
rfc3442 = rfc3442.rstrip(";")
- tokens = rfc3442.split(',')
+ tokens = [tok for tok in re.split(r"[, .]", rfc3442) if tok]
static_routes = []
def _trunc_error(cidr, required, remain):
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 70771060..2f714563 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -429,7 +429,9 @@ class Renderer(renderer.Renderer):
iface['mode'] = 'auto'
# Use stateless DHCPv6 (0=off, 1=on)
iface['dhcp'] = '0'
- elif subnet_is_ipv6(subnet) and subnet['type'] == 'static':
+ elif subnet_is_ipv6(subnet):
+ # mode might be static6, eni uses 'static'
+ iface['mode'] = 'static'
if accept_ra is not None:
# Accept router advertisements (0=off, 1=on)
iface['accept_ra'] = '1' if accept_ra else '0'
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
new file mode 100644
index 00000000..d6f61da3
--- /dev/null
+++ b/cloudinit/net/freebsd.py
@@ -0,0 +1,175 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import util
+from cloudinit.distros import rhel_util
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
+
+from . import renderer
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(renderer.Renderer):
+ resolv_conf_fn = 'etc/resolv.conf'
+ rc_conf_fn = 'etc/rc.conf'
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.dhcp_interfaces = []
+ self._postcmds = config.get('postcmds', True)
+
+ def _update_rc_conf(self, settings, target=None):
+ fn = util.target_path(target, self.rc_conf_fn)
+ rhel_util.update_sysconfig_file(fn, settings)
+
+ def _write_ifconfig_entries(self, settings, target=None):
+ ifname_by_mac = net.get_interfaces_by_mac()
+ for interface in settings.iter_interfaces():
+ device_name = interface.get("name")
+ device_mac = interface.get("mac_address")
+ if device_name and re.match(r'^lo\d+$', device_name):
+ continue
+ if device_mac not in ifname_by_mac:
+ LOG.info('Cannot find any device with MAC %s', device_mac)
+ elif device_mac and device_name:
+ cur_name = ifname_by_mac[device_mac]
+ if cur_name != device_name:
+ LOG.info('netif service will rename interface %s to %s',
+ cur_name, device_name)
+ self._update_rc_conf(
+ {'ifconfig_%s_name' % cur_name: device_name},
+ target=target)
+ else:
+ device_name = ifname_by_mac[device_mac]
+
+ LOG.info('Configuring interface %s', device_name)
+ ifconfig = 'DHCP' # default
+
+ for subnet in interface.get("subnets", []):
+ if ifconfig != 'DHCP':
+ LOG.info('The FreeBSD provider only set the first subnet.')
+ break
+ if subnet.get('type') == 'static':
+ if not subnet.get('netmask'):
+ LOG.debug(
+ 'Skipping IP %s, because there is no netmask',
+ subnet.get('address'))
+ continue
+ LOG.debug('Configuring dev %s with %s / %s', device_name,
+ subnet.get('address'), subnet.get('netmask'))
+ # Configure an ipv4 address.
+ ifconfig = (
+ subnet.get('address') + ' netmask ' +
+ subnet.get('netmask'))
+
+ if ifconfig == 'DHCP':
+ self.dhcp_interfaces.append(device_name)
+ self._update_rc_conf(
+ {'ifconfig_' + device_name: ifconfig},
+ target=target)
+
+ def _write_route_entries(self, settings, target=None):
+ routes = list(settings.iter_routes())
+ for interface in settings.iter_interfaces():
+ subnets = interface.get("subnets", [])
+ for subnet in subnets:
+ if subnet.get('type') != 'static':
+ continue
+ gateway = subnet.get('gateway')
+ if gateway and len(gateway.split('.')) == 4:
+ routes.append({
+ 'network': '0.0.0.0',
+ 'netmask': '0.0.0.0',
+ 'gateway': gateway})
+ routes += subnet.get('routes', [])
+ route_cpt = 0
+ for route in routes:
+ network = route.get('network')
+ if not network:
+ LOG.debug('Skipping a bad route entry')
+ continue
+ netmask = route.get('netmask')
+ gateway = route.get('gateway')
+ route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
+ if network == '0.0.0.0':
+ self._update_rc_conf(
+ {'defaultrouter': gateway}, target=target)
+ else:
+ self._update_rc_conf(
+ {'route_net%d' % route_cpt: route_cmd}, target=target)
+ route_cpt += 1
+
+ def _write_resolve_conf(self, settings, target=None):
+ nameservers = settings.dns_nameservers
+ searchdomains = settings.dns_searchdomains
+ for interface in settings.iter_interfaces():
+ for subnet in interface.get("subnets", []):
+ if 'dns_nameservers' in subnet:
+ nameservers.extend(subnet['dns_nameservers'])
+ if 'dns_search' in subnet:
+ searchdomains.extend(subnet['dns_search'])
+ # Try to read the /etc/resolv.conf or just start from scratch if that
+ # fails.
+ try:
+ resolvconf = ResolvConf(util.load_file(util.target_path(
+ target, self.resolv_conf_fn)))
+ resolvconf.parse()
+ except IOError:
+ util.logexc(LOG, "Failed to parse %s, use new empty file",
+ util.target_path(target, self.resolv_conf_fn))
+ resolvconf = ResolvConf('')
+ resolvconf.parse()
+
+ # Add some nameservers
+ for server in nameservers:
+ try:
+ resolvconf.add_nameserver(server)
+ except ValueError:
+ util.logexc(LOG, "Failed to add nameserver %s", server)
+
+ # And add any searchdomains.
+ for domain in searchdomains:
+ try:
+ resolvconf.add_search_domain(domain)
+ except ValueError:
+ util.logexc(LOG, "Failed to add search domain %s", domain)
+ util.write_file(
+ util.target_path(target, self.resolv_conf_fn),
+ str(resolvconf), 0o644)
+
+ def _write_network(self, settings, target=None):
+ self._write_ifconfig_entries(settings, target=target)
+ self._write_route_entries(settings, target=target)
+ self._write_resolve_conf(settings, target=target)
+
+ self.start_services(run=self._postcmds)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ self._write_network(network_state, target=target)
+
+ def start_services(self, run=False):
+ if not run:
+ LOG.debug("freebsd generate postcmd disabled")
+ return
+
+ util.subp(['service', 'netif', 'restart'], capture=True)
+ # On FreeBSD 10, the restart of routing and dhclient is likely to fail
+ # because
+ # - routing: it cannot remove the loopback route, but it will still set
+ # up the default route as expected.
+ # - dhclient: it cannot stop the dhclient started by the netif service.
+ # In both case, the situation is ok, and we can proceed.
+ util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
+ for dhcp_interface in self.dhcp_interfaces:
+ util.subp(['service', 'dhclient', 'restart', dhcp_interface],
+ rcs=[0, 1],
+ capture=True)
+
+
+def available(target=None):
+ return util.is_FreeBSD()
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 14d3999f..89855270 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -98,7 +98,7 @@ def _extract_addresses(config, entry, ifname, features=None):
entry.update({sn_type: True})
elif sn_type in IPV6_DYNAMIC_TYPES:
entry.update({'dhcp6': True})
- elif sn_type in ['static']:
+ elif sn_type in ['static', 'static6']:
addr = "%s" % subnet.get('address')
if 'prefix' in subnet:
addr += "/%d" % subnet.get('prefix')
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 7d206a1a..9b126100 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -73,7 +73,7 @@ def parse_net_config_data(net_config, skip_broken=True):
# pass the whole net-config as-is
config = net_config
- if version and config:
+ if version and config is not None:
nsi = NetworkStateInterpreter(version=version, config=config)
nsi.parse_config(skip_broken=skip_broken)
state = nsi.get_network_state()
@@ -941,7 +941,7 @@ def subnet_is_ipv6(subnet):
# 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or
# 'ipv6_slaac'
if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES:
- # This is a request for DHCPv6.
+ # This is a request either static6 type or DHCPv6.
return True
elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')):
return True
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index 5117b4a5..b98dbbe3 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -1,17 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
from . import eni
+from . import freebsd
from . import netplan
from . import RendererNotFoundError
from . import sysconfig
NAME_TO_RENDERER = {
"eni": eni,
+ "freebsd": freebsd,
"netplan": netplan,
"sysconfig": sysconfig,
}
-DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan"]
+DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"]
def search(priority=None, target=None, first=False):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 310cdf01..3e06af01 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -367,7 +367,7 @@ class Renderer(renderer.Renderer):
iface_cfg['IPV6_AUTOCONF'] = True
elif subnet_type in ['dhcp4', 'dhcp']:
iface_cfg['BOOTPROTO'] = 'dhcp'
- elif subnet_type == 'static':
+ elif subnet_type in ['static', 'static6']:
# grep BOOTPROTO sysconfig.txt -A2 | head -3
# BOOTPROTO=none|bootp|dhcp
# 'bootp' or 'dhcp' cause a DHCP client
@@ -419,7 +419,7 @@ class Renderer(renderer.Renderer):
continue
elif subnet_type in IPV6_DYNAMIC_TYPES:
continue
- elif subnet_type == 'static':
+ elif subnet_type in ['static', 'static6']:
if subnet_is_ipv6(subnet):
ipv6_index = ipv6_index + 1
ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index 91f503c9..c3fa1e04 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -90,6 +90,32 @@ class TestDHCPRFC3442(CiTestCase):
write_file(lease_file, content)
self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ def test_parse_lease_finds_classless_static_routes(self):
+ """
+ parse_dhcp_lease_file returns classless-static-routes
+ for Centos lease format.
+ """
+ lease_file = self.tmp_path('leases')
+ content = dedent("""
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ option classless-static-routes 0 130.56.240.1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ """)
+ expected = [
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
+ 'classless-static-routes': '0 130.56.240.1',
+ 'renew': '4 2017/07/27 18:02:30',
+ 'expire': '5 2017/07/28 07:08:15'}]
+ write_file(lease_file, content)
+ self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+
@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4):
@@ -112,6 +138,31 @@ class TestDHCPRFC3442(CiTestCase):
'router': '192.168.2.1'}
m_ipv4.assert_called_with(**expected_kwargs)
+ @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+ @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4):
+ """
+ EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network
+ for Centos Lease format
+ """
+ lease = [
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
+ 'classless-static-routes': '0 130.56.240.1',
+ 'renew': '4 2017/07/27 18:02:30',
+ 'expire': '5 2017/07/28 07:08:15'}]
+ m_maybe.return_value = lease
+ eph = net.dhcp.EphemeralDHCPv4()
+ eph.obtain_lease()
+ expected_kwargs = {
+ 'interface': 'wlp3s0',
+ 'ip': '192.168.2.74',
+ 'prefix_or_mask': '255.255.255.0',
+ 'broadcast': '192.168.2.255',
+ 'static_routes': [('0.0.0.0/0', '130.56.240.1')],
+ 'router': '192.168.2.1'}
+ m_ipv4.assert_called_with(**expected_kwargs)
+
class TestDHCPParseStaticRoutes(CiTestCase):
@@ -181,6 +232,20 @@ class TestDHCPParseStaticRoutes(CiTestCase):
logs = self.logs.getvalue()
self.assertIn(rfc3442, logs.splitlines()[0])
+ def test_redhat_format(self):
+ redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1"
+ self.assertEqual(sorted([
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1")
+ ]), sorted(parse_static_routes(redhat_format)))
+
+ def test_redhat_format_with_a_space_too_much_after_comma(self):
+ redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1"
+ self.assertEqual(sorted([
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1")
+ ]), sorted(parse_static_routes(redhat_format)))
+
class TestDHCPDiscoveryClean(CiTestCase):
with_logs = True
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
new file mode 100644
index 00000000..fcb4a995
--- /dev/null
+++ b/cloudinit/net/tests/test_network_state.py
@@ -0,0 +1,47 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import mock
+from cloudinit.net import network_state
+from cloudinit.tests.helpers import CiTestCase
+
+netstate_path = 'cloudinit.net.network_state'
+
+
+class TestNetworkStateParseConfig(CiTestCase):
+
+ def setUp(self):
+ super(TestNetworkStateParseConfig, self).setUp()
+ nsi_path = netstate_path + '.NetworkStateInterpreter'
+ self.add_patch(nsi_path, 'm_nsi')
+
+ def test_missing_version_returns_none(self):
+ ncfg = {}
+ self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+
+ def test_unknown_versions_returns_none(self):
+ ncfg = {'version': 13.2}
+ self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+
+ def test_version_2_passes_self_as_config(self):
+ ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
+ network_state.parse_net_config_data(ncfg)
+ self.assertEqual([mock.call(version=2, config=ncfg)],
+ self.m_nsi.call_args_list)
+
+ def test_valid_config_gets_network_state(self):
+ ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v1_config_gets_network_state(self):
+ ncfg = {'version': 1, 'config': []}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v2_config_gets_network_state(self):
+ ncfg = {'version': 2}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index e91cd263..6ba21f4d 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -358,18 +358,6 @@ def route_info():
return routes
-def getgateway():
- try:
- routes = route_info()
- except Exception:
- pass
- else:
- for r in routes.get('ipv4', []):
- if r['flags'].find("G") >= 0:
- return "%s[%s]" % (r['gateway'], r['iface'])
- return None
-
-
def netdev_pformat():
lines = []
empty = "."
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 2060d81f..ca4ffa8e 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -40,6 +40,7 @@ CFG_BUILTIN = {
'IBMCloud',
'Oracle',
'Exoscale',
+ 'RbxCloud',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 87a848ce..61ec522a 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -33,7 +33,8 @@ from cloudinit.sources.helpers.azure import (
get_boot_telemetry,
get_system_info,
report_diagnostic_event,
- EphemeralDHCPv4WithReporting)
+ EphemeralDHCPv4WithReporting,
+ is_byte_swapped)
LOG = logging.getLogger(__name__)
@@ -354,16 +355,16 @@ class DataSourceAzure(sources.DataSource):
for pk in self.cfg.get('_pubkeys', []):
if pk.get('value', None):
key_value = pk['value']
- LOG.debug("ssh authentication: using value from fabric")
+ LOG.debug("SSH authentication: using value from fabric")
else:
bname = str(pk['fingerprint'] + ".crt")
fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: "
+ LOG.debug("SSH authentication: "
"using fingerprint from fabric")
with events.ReportEventStack(
name="waiting-for-ssh-public-key",
- description="wait for agents to retrieve ssh keys",
+ description="wait for agents to retrieve SSH keys",
parent=azure_ds_reporter):
# wait very long for public SSH keys to arrive
# https://bugs.launchpad.net/cloud-init/+bug/1717611
@@ -471,8 +472,7 @@ class DataSourceAzure(sources.DataSource):
seed = _get_random_seed()
if seed:
crawled_data['metadata']['random_seed'] = seed
- crawled_data['metadata']['instance-id'] = util.read_dmi_data(
- 'system-uuid')
+ crawled_data['metadata']['instance-id'] = self._iid()
if perform_reprovision:
LOG.info("Reporting ready to Azure after getting ReprovisionData")
@@ -558,6 +558,16 @@ class DataSourceAzure(sources.DataSource):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ def _iid(self, previous=None):
+ prev_iid_path = os.path.join(
+ self.paths.get_cpath('data'), 'instance-id')
+ iid = util.read_dmi_data('system-uuid')
+ if os.path.exists(prev_iid_path):
+ previous = util.load_file(prev_iid_path).strip()
+ if is_byte_swapped(previous, iid):
+ return previous
+ return iid
+
@azure_ds_telemetry_reporter
def setup(self, is_new_instance):
if self._negotiated is False:
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 21e6ae6b..e0c714e8 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -83,7 +83,7 @@ creates 6 boot scenarios.
There is no information available to identify this scenario.
- The user will be able to ssh in as as root with their public keys that
+ The user will be able to SSH in as as root with their public keys that
have been installed into /root/ssh/.authorized_keys
during the provisioning stage.
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index 9a8c3d5c..c3cd5c79 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -197,6 +197,7 @@ def read_user_data_callback(mount_dir):
class DataSourceRbxCloud(sources.DataSource):
+ dsname = "RbxCloud"
update_events = {'network': [
EventType.BOOT_NEW_INSTANCE,
EventType.BOOT
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index f5cdb3fd..fc760581 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -7,6 +7,7 @@ import re
import socket
import struct
import time
+import textwrap
from cloudinit.net import dhcp
from cloudinit import stages
@@ -48,6 +49,32 @@ def azure_ds_telemetry_reporter(func):
return impl
+def is_byte_swapped(previous_id, current_id):
+ """
+ Azure stores the instance ID with an incorrect byte ordering for the
+ first parts. This corrects the byte order such that it is consistent with
+ that returned by the metadata service.
+ """
+ if previous_id == current_id:
+ return False
+
+ def swap_bytestring(s, width=2):
+ dd = [byte for byte in textwrap.wrap(s, 2)]
+ dd.reverse()
+ return ''.join(dd)
+
+ parts = current_id.split('-')
+ swapped_id = '-'.join([
+ swap_bytestring(parts[0]),
+ swap_bytestring(parts[1]),
+ swap_bytestring(parts[2]),
+ parts[3],
+ parts[4]
+ ])
+
+ return previous_id == swapped_id
+
+
@azure_ds_telemetry_reporter
def get_boot_telemetry():
"""Report timestamps related to kernel initialization and systemd
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 3f99b58c..c3a9b5b7 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -17,7 +17,7 @@ LOG = logging.getLogger(__name__)
# See: man sshd_config
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
-# taken from openssh source openssh-7.3p1/sshkey.c:
+# taken from OpenSSH source openssh-7.3p1/sshkey.c:
# static const struct keytype keytypes[] = { ... }
VALID_KEY_TYPES = (
"dsa",
@@ -160,19 +160,19 @@ class AuthKeyLineParser(object):
comment=comment, options=options)
-def parse_authorized_keys(fname):
+def parse_authorized_keys(fnames):
lines = []
- try:
- if os.path.isfile(fname):
- lines = util.load_file(fname).splitlines()
- except (IOError, OSError):
- util.logexc(LOG, "Error reading lines from %s", fname)
- lines = []
-
parser = AuthKeyLineParser()
contents = []
- for line in lines:
- contents.append(parser.parse(line))
+ for fname in fnames:
+ try:
+ if os.path.isfile(fname):
+ lines = util.load_file(fname).splitlines()
+ for line in lines:
+ contents.append(parser.parse(line))
+ except (IOError, OSError):
+ util.logexc(LOG, "Error reading lines from %s", fname)
+
return contents
@@ -207,36 +207,50 @@ def update_authorized_keys(old_entries, keys):
def users_ssh_info(username):
pw_ent = pwd.getpwnam(username)
if not pw_ent or not pw_ent.pw_dir:
- raise RuntimeError("Unable to get ssh info for user %r" % (username))
+ raise RuntimeError("Unable to get SSH info for user %r" % (username))
return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
-def extract_authorized_keys(username):
+def render_authorizedkeysfile_paths(value, homedir, username):
+ # The 'AuthorizedKeysFile' may contain tokens
+ # of the form %T which are substituted during connection set-up.
+ # The following tokens are defined: %% is replaced by a literal
+ # '%', %h is replaced by the home directory of the user being
+ # authenticated and %u is replaced by the username of that user.
+ macros = (("%h", homedir), ("%u", username), ("%%", "%"))
+ if not value:
+ value = "%h/.ssh/authorized_keys"
+ paths = value.split()
+ rendered = []
+ for path in paths:
+ for macro, field in macros:
+ path = path.replace(macro, field)
+ if not path.startswith("/"):
+ path = os.path.join(homedir, path)
+ rendered.append(path)
+ return rendered
+
+
+def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
(ssh_dir, pw_ent) = users_ssh_info(username)
- auth_key_fn = None
+ default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys')
+ auth_key_fns = []
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
- # The 'AuthorizedKeysFile' may contain tokens
- # of the form %T which are substituted during connection set-up.
- # The following tokens are defined: %% is replaced by a literal
- # '%', %h is replaced by the home directory of the user being
- # authenticated and %u is replaced by the username of that user.
- ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG)
- auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
- if not auth_key_fn:
- auth_key_fn = "%h/.ssh/authorized_keys"
- auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
- auth_key_fn = auth_key_fn.replace("%u", username)
- auth_key_fn = auth_key_fn.replace("%%", '%')
- if not auth_key_fn.startswith('/'):
- auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
+ ssh_cfg = parse_ssh_config_map(sshd_cfg_file)
+ auth_key_fns = render_authorizedkeysfile_paths(
+ ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"),
+ pw_ent.pw_dir, username)
+
except (IOError, OSError):
# Give up and use a default key filename
- auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
- util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh "
+ auth_key_fns[0] = default_authorizedkeys_file
+ util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH "
"config from %r, using 'AuthorizedKeysFile' file "
- "%r instead", DEF_SSHD_CFG, auth_key_fn)
- return (auth_key_fn, parse_authorized_keys(auth_key_fn))
+ "%r instead", DEF_SSHD_CFG, auth_key_fns[0])
+
+ # always store all the keys in the user's private file
+ return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns))
def setup_user_keys(keys, username, options=None):
@@ -335,7 +349,7 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
def update_ssh_config_lines(lines, updates):
- """Update the ssh config lines per updates.
+ """Update the SSH config lines per updates.
@param lines: array of SshdConfigLine. This array is updated in place.
@param updates: dictionary of desired values {Option: value}
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 64ed82ea..11f37000 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -189,6 +189,21 @@ class TestUtil(CiTestCase):
self.assertEqual(is_rw, False)
+class TestUptime(CiTestCase):
+
+ @mock.patch('cloudinit.util.boottime')
+ @mock.patch('cloudinit.util.os.path.exists')
+ @mock.patch('cloudinit.util.time.time')
+ def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime):
+ boottime = 1000.0
+ uptime = 10.0
+ m_boottime.return_value = boottime
+ m_time.return_value = boottime + uptime
+ m_exists.return_value = False
+ result = util.uptime()
+ self.assertEqual(str(uptime), result)
+
+
class TestShellify(CiTestCase):
def test_input_dict_raises_type_error(self):
@@ -523,7 +538,7 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(
('opensuse-tumbleweed', '20180920', platform.machine()), dist)
- @mock.patch('platform.dist')
+ @mock.patch('platform.dist', create=True)
def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
"""Verify we get no information if os-release does not exist"""
m_platform_dist.return_value = ('', '', '')
@@ -531,7 +546,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
- @mock.patch('platform.dist')
+ @mock.patch('platform.dist', create=True)
def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
"""Verify we get an empty tuple when no information exists and
Exceptions are not propagated"""
@@ -540,7 +555,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
- @mock.patch('platform.dist')
+ @mock.patch('platform.dist', create=True)
def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
"""Verify we get the correct platform information"""
m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 9d9d5c72..76d7db78 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -10,7 +10,6 @@
import contextlib
import copy as obj_copy
-import ctypes
import email
import glob
import grp
@@ -635,8 +634,8 @@ def get_linux_distro():
else:
dist = ('', '', '')
try:
- # Will be removed in 3.7
- dist = platform.dist() # pylint: disable=W1505
+ # Was removed in 3.8
+ dist = platform.dist() # pylint: disable=W1505,E1101
except Exception:
pass
finally:
@@ -1807,6 +1806,33 @@ def time_rfc2822():
return ts
+def boottime():
+ """Use sysctlbyname(3) via ctypes to find kern.boottime
+
+ kern.boottime is of type struct timeval. Here we create a
+ private class to easier unpack it.
+
+ @return boottime: float to be compatible with linux
+ """
+ import ctypes
+
+ NULL_BYTES = b"\x00"
+
+ class timeval(ctypes.Structure):
+ _fields_ = [
+ ("tv_sec", ctypes.c_int64),
+ ("tv_usec", ctypes.c_int64)
+ ]
+ libc = ctypes.CDLL('/lib/libc.so.7')
+ size = ctypes.c_size_t()
+ size.value = ctypes.sizeof(timeval)
+ buf = timeval()
+ if libc.sysctlbyname(b"kern.boottime" + NULL_BYTES, ctypes.byref(buf),
+ ctypes.byref(size), None, 0) != -1:
+ return buf.tv_sec + buf.tv_usec / 1000000.0
+ raise RuntimeError("Unable to retrieve kern.boottime on this system")
+
+
def uptime():
uptime_str = '??'
method = 'unknown'
@@ -1818,15 +1844,8 @@ def uptime():
uptime_str = contents.split()[0]
else:
method = 'ctypes'
- libc = ctypes.CDLL('/lib/libc.so.7')
- size = ctypes.c_size_t()
- buf = ctypes.c_int()
- size.value = ctypes.sizeof(buf)
- libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
- ctypes.byref(size), None, 0)
- now = time.time()
- bootup = buf.value
- uptime_str = now - bootup
+ # This is the *BSD codepath
+ uptime_str = str(time.time() - boottime())
except Exception:
logexc(LOG, "Unable to read uptime using method: %s" % method)
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 5ecc2848..45234499 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "19.3"
+__VERSION__ = "19.4"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/config/cloud.cfg.d/README b/config/cloud.cfg.d/README
index 60702e9d..036b80bf 100644
--- a/config/cloud.cfg.d/README
+++ b/config/cloud.cfg.d/README
@@ -1,3 +1,3 @@
-# All files in this directory will be read by cloud-init
-# They are read in lexical order. Later files overwrite values in
+# All files with the '.cfg' extension in this directory will be read by
+# cloud-init. They are read in lexical order. Later files overwrite values in
# earlier files.
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 87c37ba0..99f96ea1 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -21,8 +21,11 @@ disable_root: false
disable_root: true
{% endif %}
-{% if variant in ["centos", "fedora", "rhel"] %}
+{% if variant in ["amazon", "centos", "fedora", "rhel"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
+{% if variant == "amazon" %}
+resize_rootfs: noblock
+{% endif %}
resize_rootfs_tmp: /dev
ssh_pwauth: 0
@@ -42,6 +45,13 @@ datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2']
# timeout: 5 # (defaults to 50 seconds)
# max_wait: 10 # (defaults to 120 seconds)
+
+{% if variant == "amazon" %}
+# Amazon Linux relies on ec2-net-utils for network configuration
+network:
+ config: disabled
+{% endif %}
+
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
@@ -71,7 +81,6 @@ cloud_config_modules:
# this can be used by upstart jobs for 'start on cloud-config'.
- emit_upstart
- snap
- - snap_config # DEPRECATED- Drop in version 18.2
{% endif %}
- ssh-import-id
- locale
@@ -103,9 +112,6 @@ cloud_config_modules:
# The modules that run in the 'final' stage
cloud_final_modules:
-{% if variant in ["ubuntu", "unknown", "debian"] %}
- - snappy # DEPRECATED- Drop in version 18.2
-{% endif %}
- package-update-upgrade-install
{% if variant in ["ubuntu", "unknown", "debian"] %}
- fan
@@ -137,7 +143,7 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", "ubuntu"] %}
+{% if variant in ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", "ubuntu"] %}
distro: {{ variant }}
{% else %}
# Unknown/fallback distro.
@@ -185,12 +191,18 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["arch", "centos", "fedora", "rhel", "suse"] %}
+{% elif variant in ["amazon", "arch", "centos", "fedora", "rhel", "suse"] %}
# Default user name + that default users groups (if added/used)
default_user:
+{% if variant == "amazon" %}
+ name: ec2-user
+ lock_passwd: True
+ gecos: EC2 Default User
+{% else %}
name: {{ variant }}
lock_passwd: True
gecos: {{ variant }} Cloud User
+{% endif %}
{% if variant == "suse" %}
groups: [cdrom, users]
{% elif variant == "arch" %}
diff --git a/doc-requirements.txt b/doc-requirements.txt
new file mode 100644
index 00000000..e8977de9
--- /dev/null
+++ b/doc-requirements.txt
@@ -0,0 +1,5 @@
+doc8
+m2r
+sphinx
+sphinx_rtd_theme
+pyyaml
diff --git a/doc/examples/cloud-config-add-apt-repos.txt b/doc/examples/cloud-config-add-apt-repos.txt
index 22ef7612..97722107 100644
--- a/doc/examples/cloud-config-add-apt-repos.txt
+++ b/doc/examples/cloud-config-add-apt-repos.txt
@@ -1,6 +1,10 @@
#cloud-config
-# Add apt repositories
+# Add primary apt repositories
+#
+# To add 3rd party repositories, see cloud-config-apt.txt or the
+# Additional apt configuration and repositories section.
+#
#
# Default: auto select based on cloud metadata
# in ec2, the default is <region>.archive.ubuntu.com
diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt
index 235a114f..aad8b683 100644
--- a/doc/examples/cloud-config-ssh-keys.txt
+++ b/doc/examples/cloud-config-ssh-keys.txt
@@ -6,7 +6,7 @@ ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
-# Send pre-generated ssh private keys to the server
+# Send pre-generated SSH private keys to the server
# If these are present, they will be written to /etc/ssh and
# new random keys will not be generated
# in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported
@@ -42,5 +42,3 @@ ssh_keys:
-----END DSA PRIVATE KEY-----
dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
-
-
diff --git a/doc/examples/cloud-config-update-apt.txt b/doc/examples/cloud-config-update-apt.txt
index 647241ca..aaa47326 100644
--- a/doc/examples/cloud-config-update-apt.txt
+++ b/doc/examples/cloud-config-update-apt.txt
@@ -5,4 +5,4 @@
#
# Default: false
# Aliases: apt_update
-package_update: false
+package_update: true
diff --git a/doc/man/cloud-id.1 b/doc/man/cloud-id.1
new file mode 100644
index 00000000..98ce130c
--- /dev/null
+++ b/doc/man/cloud-id.1
@@ -0,0 +1,31 @@
+.TH CLOUD-ID 1
+
+.SH NAME
+cloud-id \- Report the canonical cloud-id for this instance
+
+.SH SYNOPSIS
+.BR "cloud-id" " [-h] [-j] [-l] [-i <INSTANCE_DATA>]"
+
+.SH OPTIONS
+.TP
+.B "-h, --help"
+Show help message and exit
+
+.TP
+.B "-j, --json"
+Report all standardized cloud-id information as json
+
+.TP
+.B "-l, --long"
+Report extended cloud-id information as tab-delimited string
+
+.TP
+.BR "-i <data>, --instance-data <data>"
+Path to instance-data.json file. Default is
+/run/cloud-init/instance-data.json
+
+.SH COPYRIGHT
+Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0
+
+.SH SEE ALSO
+Full documentation at: <https://cloudinit.readthedocs.io>
diff --git a/doc/man/cloud-init-per.1 b/doc/man/cloud-init-per.1
new file mode 100644
index 00000000..3668232e
--- /dev/null
+++ b/doc/man/cloud-init-per.1
@@ -0,0 +1,45 @@
+.TH CLOUD-INIT-PER 1
+
+.SH NAME
+cloud-init-per \- Run a command with arguments at a specific frequency
+
+.SH SYNOPSIS
+.BR "cloud-init-per" " <frequency> <name> <cmd> [ arg1 [ arg2 [...]]]"
+
+.SH DESCRIPTION
+Run a command with arguments at a specific frequency.
+
+This utility can make it easier to use boothooks or bootcmd on a per
+"once" or "always" basis. For example:
+
+ - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
+
+The cloud-init-per command replaced the cloud-init-run-module command.
+
+.SH OPTIONS
+.TP
+.B "frequency"
+This can be one of the following values:
+
+.BR "once" ":"
+run only once and do not re-run for new instance-id
+
+.BR "instance" ":"
+run only the first boot for a given instance-id
+
+.BR "always" ":"
+run every boot
+
+.TP
+.B "name"
+A name to give the command to run to show up in logs.
+
+.TP
+.B "cmd [ arg1 [ arg2 [...]]]"
+The actual command to run followed by any additional arguments.
+
+.SH COPYRIGHT
+Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0
+
+.SH SEE ALSO
+Full documentation at: <https://cloudinit.readthedocs.io>
diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1
new file mode 100644
index 00000000..9b52dc8d
--- /dev/null
+++ b/doc/man/cloud-init.1
@@ -0,0 +1,88 @@
+.TH CLOUD-INIT 1
+
+.SH NAME
+cloud-init \- Cloud instance initialization
+
+.SH SYNOPSIS
+.BR "cloud-init" " [-h] [-d] [-f FILES] [--force] [-v] {init,modules,single,query,dhclient-hook,features,analyze,collect-logs,clean,status}"
+
+.SH DESCRIPTION
+Cloud-init provides a mechanism for cloud instance initialization.
+This is done by identifying the cloud platform that is in use, reading
+provided cloud metadata and optional vendor and user
+data, and then intializing the instance as requested.
+
+Generally, this command is not normally meant to be run directly by
+the user. However, some subcommands may useful for development or
+debug of deployments.
+
+.SH OPTIONS
+.TP
+.B "-h, --help"
+Show help message and exit
+
+.TP
+.B "-d, --debug"
+Show additional pre-action logging (default: False)
+
+.TP
+.B "-f <files>, --files <files>"
+Additional YAML configuration files to use
+
+.TP
+.B "--force"
+Force running even if no datasource is found (use at your own risk)
+
+.TP
+.B "-v, --version"
+Show program's version number and exit
+
+.SH SUBCOMMANDS
+Please see the help output for each subcommand for additional details,
+flags, and subcommands.
+
+.TP
+.B "analyze"
+Analyze cloud-init logs and data.
+
+.TP
+.B "collect-logs"
+Collect and tar all cloud-init debug info.
+
+.TP
+.B "clean"
+Remove logs and artifacts so cloud-init can re-run.
+
+.TP
+.B "dhclient-hook"
+Run the dhclient hook to record network info.
+
+.TP
+.B "features"
+List defined features.
+
+.TP
+.B "init"
+Initializes cloud-init and performs initial modules.
+
+.TP
+.B "modules"
+Activates modules using a given configuration key.
+
+.TP
+.B "query"
+Query standardized instance metadata from the command line.
+
+.TP
+.B "single"
+Run a single module.
+
+.TP
+.B "status"
+Report cloud-init status or wait on completion.
+
+.SH COPYRIGHT
+Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0
+
+.SH SEE ALSO
+Full documentation at: <https://cloudinit.readthedocs.io>
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 9b274843..86441986 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -28,6 +28,7 @@ copyright = '2019, Canonical Ltd.'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
+ 'm2r',
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.viewcode',
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 826e8c48..5d90c131 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -40,6 +40,7 @@ Having trouble? We would like to help!
topics/boot.rst
topics/cli.rst
topics/faq.rst
+ topics/bugs.rst
.. toctree::
:hidden:
@@ -67,6 +68,7 @@ Having trouble? We would like to help!
:caption: Development
topics/hacking.rst
+ topics/security.rst
topics/debugging.rst
topics/logging.rst
topics/dir_layout.rst
diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst
new file mode 100644
index 00000000..4b60776b
--- /dev/null
+++ b/doc/rtd/topics/bugs.rst
@@ -0,0 +1,108 @@
+.. _reporting_bugs:
+
+Reporting Bugs
+**************
+
+The following documents:
+
+1) How to collect information for reporting bugs
+2) How to file bugs to the upstream cloud-init project or for distro specific
+ packages
+
+Collect Logs
+============
+
+To aid in debugging, please collect the necessary logs. To do so, run the
+`collect-logs` subcommand to produce a tarfile that you can easily upload:
+
+.. code-block:: shell-session
+
+ $ cloud-init collect-logs
+ Wrote /home/ubuntu/cloud-init.tar.gz
+
+If your version of cloud-init does not have the `collect-logs` subcommand,
+then please manually collect the base log files by doing the following:
+
+.. code-block:: shell-session
+
+ $ dmesg > dmesg.txt
+ $ sudo journalctl -o short-precise > journal.txt
+ $ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \
+ /var/log/cloud-init.log /var/log/cloud-init-output.log
+
+Report Upstream Bug
+===================
+
+Bugs for upstream cloud-init are tracked using Launchpad. To file a bug:
+
+1. Collect the necessary debug logs as described above
+2. `Create a Launchpad account`_ or login to your existing account
+3. `Report an upstream cloud-init bug`_
+
+If debug logs are not provided, you will be asked for them before any
+further time is spent debugging. If you are unable to obtain the required
+logs please explain why in the bug.
+
+If your bug is for a specific distro using cloud-init, please first consider
+reporting it with the upstream distro or confirm that it still occurs
+with the latest upstream cloud-init code. See below for details on specific
+distro reporting.
+
+Distro Specific Issues
+======================
+
+For issues specific to your distro please use one of the following distro
+specific reporting mechanisms:
+
+Ubuntu
+------
+
+To report a bug on Ubuntu use the `ubuntu-bug` command on the affected
+system to automatically collect the necessary logs and file a bug on
+Launchpad:
+
+.. code-block:: shell-session
+
+ $ ubuntu-bug cloud-init
+
+If that does not work or is not an option, please collect the logs using the
+commands in the above Collect Logs section and then report the bug on the
+`Ubuntu bug tracker`_. Make sure to attach your collected logs!
+
+Debian
+------
+
+To file a bug against the Debian package fo cloud-init please use the
+`Debian bug tracker`_ to file against 'Package: cloud-init'. See the
+`Debian bug reporting wiki`_ wiki page for more details.
+
+Red Hat, CentOS, & Fedora
+-------------------------
+
+To file a bug against the Red Hat or Fedora packages of cloud-init please use
+the `Red Hat bugzilla`_.
+
+SUSE & openSUSE
+---------------
+
+To file a bug against the SuSE packages of cloud-init please use the
+`SUSE bugzilla`_.
+
+Arch
+----
+
+To file a bug against the Arch package of cloud-init please use the
+`Arch Linux Bugtracker`_. See the `Arch bug reporting wiki`_ for more
+details.
+
+.. _Create a Launchpad account: https://help.launchpad.net/YourAccount/NewAccount
+.. _Report an upstream cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
+.. _Ubuntu bug tracker: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+filebug
+.. _Debian bug tracker: https://bugs.debian.org/cgi-bin/pkgreport.cgi?pkg=cloud-init;dist=unstable
+.. _Debian bug reporting wiki: https://www.debian.org/Bugs/Reporting
+.. _Red Hat bugzilla: https://bugzilla.redhat.com/
+.. _SUSE bugzilla: https://bugzilla.suse.com/index.cgi
+.. _Arch Linux Bugtracker: https://bugs.archlinux.org/
+.. _Arch bug reporting wiki: https://wiki.archlinux.org/index.php/Bug_reporting_guidelines
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 70fbe07d..3d026143 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -30,12 +30,12 @@ The following is a list of documents for each supported datasource:
datasources/aliyun.rst
datasources/altcloud.rst
datasources/azure.rst
- datasources/ec2.rst
- datasources/e24cloud.rst
datasources/cloudsigma.rst
datasources/cloudstack.rst
datasources/configdrive.rst
datasources/digitalocean.rst
+ datasources/e24cloud.rst
+ datasources/ec2.rst
datasources/exoscale.rst
datasources/fallback.rst
datasources/gce.rst
@@ -45,6 +45,7 @@ The following is a list of documents for each supported datasource:
datasources/openstack.rst
datasources/oracle.rst
datasources/ovf.rst
+ datasources/rbxcloud.rst
datasources/smartos.rst
datasources/zstack.rst
@@ -139,7 +140,7 @@ The current interface that a datasource object must provide is the following:
# because cloud-config content would be handled elsewhere
def get_config_obj(self)
- #returns a list of public ssh keys
+ # returns a list of public SSH keys
def get_public_ssh_keys(self)
# translates a device 'short' name into the actual physical device
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index 95b95874..da183226 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -4,7 +4,7 @@ CloudStack
==========
`Apache CloudStack`_ expose user-data, meta-data, user password and account
-sshkey thru the Virtual-Router. The datasource obtains the VR address via
+SSH key thru the Virtual-Router. The datasource obtains the VR address via
dhcp lease information given to the instance.
For more details on meta-data and user-data,
refer the `CloudStack Administrator Guide`_.
diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst
index 76e4fe4c..52ec02ff 100644
--- a/doc/rtd/topics/datasources/rbxcloud.rst
+++ b/doc/rtd/topics/datasources/rbxcloud.rst
@@ -1,4 +1,4 @@
-.. _datasource_config_drive:
+.. _datasource_rbx:
Rbx Cloud
=========
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index 62b8ee49..81860f85 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -60,8 +60,8 @@ Setup and run `puppet`_
:language: yaml
:linenos:
-Add apt repositories
-====================
+Add primary apt repositories
+============================
.. literalinclude:: ../../examples/cloud-config-add-apt-repos.txt
:language: yaml
@@ -128,15 +128,15 @@ Reboot/poweroff when finished
:language: yaml
:linenos:
-Configure instances ssh-keys
+Configure instances SSH keys
============================
.. literalinclude:: ../../examples/cloud-config-ssh-keys.txt
:language: yaml
:linenos:
-Additional apt configuration
-============================
+Additional apt configuration and repositories
+=============================================
.. literalinclude:: ../../examples/cloud-config-apt.txt
:language: yaml
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 16e19c29..98c0cfaa 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -3,19 +3,214 @@
FAQ
***
-Getting help
-============
+How do I get help?
+==================
Having trouble? We would like to help!
+- First go through this page with answers to common questions
- Use the search bar at the upper left to search these docs
- Ask a question in the ``#cloud-init`` IRC channel on Freenode
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
-- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
+- Find a bug? Check out the :ref:`reporting_bugs` topic for
+ how to report one
+Where are the logs?
+===================
-Media
-=====
+Cloud-init uses two files to log to:
+
+- `/var/log/cloud-init-output.log`: captures the output from each stage of
+ cloud-init when it runs
+- `/var/log/cloud-init.log`: very detailed log with debugging output,
+ detailing each action taken
+- `/run/cloud-init`: contains logs about how cloud-init decided to enable or
+ disable itself, as well as what platforms/datasources were detected. These
+ logs are most useful when trying to determine what cloud-init ran or did not
+ run.
+
+Be aware that each time a system boots, new logs are appended to the files in
+`/var/log`. Therefore, the files may have more than one boot worth of
+information present.
+
+When reviewing these logs look for any errors or Python tracebacks to check
+for any errors.
+
+Where are the configuration files?
+==================================
+
+Cloud-init config is provided in two places:
+
+- `/etc/cloud/cloud.cfg`
+- `/etc/cloud/cloud.cfg.d/*.cfg`
+
+These files can define the modules that run during instance initialization,
+the datasources to evaluate on boot, and other settings.
+
+Where are the data files?
+=========================
+
+Inside the `/var/lib/cloud/` directory there are two important subdirectories:
+
+instance
+--------
+
+The `/var/lib/cloud/instance` directory is a symbolic link that points
+to the most recenlty used instance-id directory. This folder contains the
+information cloud-init received from datasources, including vendor and user
+data. This can be helpful to review to ensure the correct data was passed.
+
+It also contains the `datasource` file that containers the full information
+about what datasource was identified and used to setup the system.
+
+Finally, the `boot-finished` file is the last thing that cloud-init does.
+
+data
+----
+
+The `/var/lib/cloud/data` directory contain information related to the
+previous boot:
+
+* `instance-id`: id of the instance as discovered by cloud-init. Changing
+ this file has no effect.
+* `result.json`: json file will show both the datasource used to setup
+ the instance, and if any errors occured
+* `status.json`: json file shows the datasource used and a break down
+ of all four modules if any errors occured and the start and stop times.
+
+What datasource am I using?
+===========================
+
+To correctly setup an instance, cloud-init must correctly identify the
+cloud that it is on. Therefore knowing what datasource is used on an
+instance launch can help aid in debugging.
+
+To find what datasource is getting used run the `cloud-id` command:
+
+.. code-block:: shell-session
+
+ $ cloud-id
+ nocloud
+
+If the cloud-id is not what is expected, then running the `ds-identify`
+script in debug mode and providing that in a bug can help aid in resolving
+any issues:
+
+.. code-block:: shell-session
+
+ $ sudo DEBUG_LEVEL=2 DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
+
+The force parameter allows the command to be run again since the instance has
+already launched. The other options increase the verbosity of logging and
+put the logs to STDERR.
+
+How can I debug my user data?
+=============================
+
+Two of the most common issues with user data, that also happens to be
+cloud-config is:
+
+1. Incorrectly formatted YAML
+2. First line does not contain `#cloud-config`
+
+To verify your YAML, we do have a short script called `validate-yaml.py`_
+that can validate your user data offline.
+
+.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/master/tools/validate-yaml.py
+
+Another option is to run the following on an instance when debugging:
+
+.. code-block:: shell-session
+
+ $ sudo cloud-init query userdata > user-data.yaml
+ $ cloud-init devel schema -c user-data.yaml --annotate
+
+As launching instances in the cloud can cost money and take a bit longer,
+sometimes it is easier to launch instances locally using Multipass or LXD:
+
+Multipass
+---------
+
+`Multipass`_ is a cross-platform tool to launch Ubuntu VMs across Linux,
+Windows, and macOS.
+
+When a user launches a Multipass VM, user data can be passed by adding the
+`--cloud-init` flag and the appropriate YAML file containing user data:
+
+.. code-block:: shell-session
+
+ $ multipass launch bionic --name test-vm --cloud-init userdata.yaml
+
+Multipass will validate the YAML syntax of the cloud-config file before
+attempting to start the VM! A nice addition to help save time when
+experimenting with launching instances with various cloud-configs.
+
+Multipass only supports passing user-data and only as YAML cloud-config
+files. Passing a script, a MIME archive, or any of the other user-data
+formats cloud-init supports will result in an error from the YAML syntax
+validator.
+
+.. _Multipass: https://multipass.run/
+
+LXD
+---
+
+`LXD`_ offers a streamlined user experience for using linux system
+containers. With LXD, a user can pass:
+
+* user data
+* vendor data
+* metadata
+* network configuration
+
+The following initializes a container with user data:
+
+.. code-block:: shell-session
+
+ $ lxc init ubuntu-daily:bionic test-container
+ $ lxc config set test-container user.user-data - < userdata.yaml
+ $ lxc start test-container
+
+To avoid the extra commands this can also be done at launch:
+
+.. code-block:: shell-session
+
+ $ lxc launch ubuntu-daily:bionic test-container --config=user.user-data="$(cat userdata.yaml)"
+
+Finally, a profile can be setup with the specific data if a user needs to
+launch this multiple times:
+
+.. code-block:: shell-session
+
+ $ lxc profile create dev-user-data
+ $ lxc profile set dev-user-data user.user-data - < cloud-init-config.yaml
+ $ lxc launch ubuntu-daily:bionic test-container -p default -p dev-user-data
+
+The above examples all show how to pass user data. To pass other types of
+configuration data use the config option specified below:
+
++----------------+---------------------+
+| Data | Config Option |
++================+=====================+
+| user data | user.user-data |
++----------------+---------------------+
+| vendor data | user.vendor-data |
++----------------+---------------------+
+| metadata | user.meta-data |
++----------------+---------------------+
+| network config | user.network-config |
++----------------+---------------------+
+
+See the LXD `Instance Configuration`_ docs for more info about configuration
+values or the LXD `Custom Network Configuration`_ document for more about
+custom network config.
+
+.. _LXD: https://linuxcontainers.org/
+.. _Instance Configuration: https://lxd.readthedocs.io/en/latest/instances/
+.. _Custom Network Configuration: https://lxd.readthedocs.io/en/latest/cloud-init/
+
+Where can I learn more?
+========================================
Below are some videos, blog posts, and white papers about cloud-init from a
variety of sources.
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index f9f4ba6c..2b60bdd3 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -113,7 +113,7 @@ These things include:
- apt upgrade should be run on first boot
- a different apt mirror should be used
- additional apt sources should be added
-- certain ssh keys should be imported
+- certain SSH keys should be imported
- *and many more...*
.. note::
@@ -196,6 +196,13 @@ Example
Also this `blog`_ post offers another example for more advanced usage.
+Kernel Command Line
+===================
+
+When using the :ref:`datasource_nocloud` datasource, users can pass user data
+via the kernel command line parameters. See the :ref:`datasource_nocloud`
+datasource documentation for more details.
+
Disabling User-Data
===================
@@ -206,4 +213,5 @@ cloud-init from processing user-data.
.. [#] See your cloud provider for applicable user-data size limitations...
.. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index c17d0a0e..e7dd0d62 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -165,7 +165,7 @@ Examples output:
v1.public_ssh_keys
------------------
-A list of ssh keys provided to the instance by the datasource metadata.
+A list of SSH keys provided to the instance by the datasource metadata.
Examples output:
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index 3dcdd3bc..9c9be804 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -1,8 +1,11 @@
.. _modules:
+
*******
Modules
*******
+.. contents:: Table of Contents
+
.. automodule:: cloudinit.config.cc_apt_configure
.. automodule:: cloudinit.config.cc_apt_pipelining
.. automodule:: cloudinit.config.cc_bootcmd
@@ -46,8 +49,6 @@ Modules
.. automodule:: cloudinit.config.cc_set_hostname
.. automodule:: cloudinit.config.cc_set_passwords
.. automodule:: cloudinit.config.cc_snap
-.. automodule:: cloudinit.config.cc_snappy
-.. automodule:: cloudinit.config.cc_snap_config
.. automodule:: cloudinit.config.cc_spacewalk
.. automodule:: cloudinit.config.cc_ssh
.. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 51ced4d1..1520ba9a 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -191,7 +191,7 @@ supplying an updated configuration in cloud-config. ::
system_info:
network:
- renderers: ['netplan', 'eni', 'sysconfig']
+ renderers: ['netplan', 'eni', 'sysconfig', 'freebsd']
Network Configuration Tools
diff --git a/doc/rtd/topics/security.rst b/doc/rtd/topics/security.rst
new file mode 100644
index 00000000..b8386843
--- /dev/null
+++ b/doc/rtd/topics/security.rst
@@ -0,0 +1,5 @@
+.. _security:
+
+.. mdinclude:: ../../../SECURITY.md
+
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index 3b27f805..aee3d7fc 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -427,9 +427,9 @@ Azure Cloud
-----------
To run on Azure Cloud platform users login with Service Principal and export
-credentials file. Region is defaulted and can be set in ``tests/cloud_tests/platforms.yaml``.
-The Service Principal credentials are the standard authentication for Azure SDK
-to interact with Azure Services:
+credentials file. Region is defaulted and can be set in
+``tests/cloud_tests/platforms.yaml``. The Service Principal credentials are
+the standard authentication for Azure SDK to interact with Azure Services:
Create Service Principal account or login
@@ -465,7 +465,6 @@ Export credentials
Set region in platforms.yaml
.. code-block:: yaml
- :emphasize-lines: 3
azurecloud:
enabled: true
diff --git a/packages/debian/manpages b/packages/debian/manpages
new file mode 100644
index 00000000..605cfd67
--- /dev/null
+++ b/packages/debian/manpages
@@ -0,0 +1,3 @@
+doc/man/cloud-id.1
+doc/man/cloud-init-per.1
+doc/man/cloud-init.1
diff --git a/setup.py b/setup.py
index fcaf26ff..01a67b95 100755
--- a/setup.py
+++ b/setup.py
@@ -174,6 +174,19 @@ if os.uname()[0] == 'FreeBSD':
USR_LIB_EXEC = "usr/local/lib"
elif os.path.isfile('/etc/redhat-release'):
USR_LIB_EXEC = "usr/libexec"
+elif os.path.isfile('/etc/system-release-cpe'):
+ with open('/etc/system-release-cpe') as f:
+ cpe_data = f.read().rstrip().split(':')
+
+ if cpe_data[1] == "\o":
+ # URI formated CPE
+ inc = 0
+ else:
+ # String formated CPE
+ inc = 1
+ (cpe_vendor, cpe_product, cpe_version) = cpe_data[2+inc:5+inc]
+ if cpe_vendor == "amazon":
+ USR_LIB_EXEC = "usr/libexec"
class MyEggInfo(egg_info):
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
index 33263009..6bf7fa5b 100755
--- a/sysvinit/freebsd/cloudinit
+++ b/sysvinit/freebsd/cloudinit
@@ -1,7 +1,7 @@
#!/bin/sh
# PROVIDE: cloudinit
-# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal
+# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal devd
# BEFORE: cloudconfig cloudfinal
. /etc/rc.subr
diff --git a/tests/cloud_tests/testcases/modules/TODO.md b/tests/cloud_tests/testcases/modules/TODO.md
index 0b933b3b..9513cb2d 100644
--- a/tests/cloud_tests/testcases/modules/TODO.md
+++ b/tests/cloud_tests/testcases/modules/TODO.md
@@ -78,11 +78,8 @@ Not applicable to write a test for this as it specifies when something should be
## scripts vendor
Not applicable to write a test for this as it specifies when something should be run.
-## snappy
-2016-11-17: Need test to install snaps from store
-
-## snap-config
-2016-11-17: Need to investigate
+## snap
+2019-12-19: Need to investigate
## spacewalk
diff --git a/tests/cloud_tests/testcases/modules/snappy.py b/tests/cloud_tests/testcases/modules/snappy.py
deleted file mode 100644
index 7d17fc5b..00000000
--- a/tests/cloud_tests/testcases/modules/snappy.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script"""
-from tests.cloud_tests.testcases import base
-
-
-class TestSnappy(base.CloudTestCase):
- """Test snappy module"""
-
- expected_warnings = ('DEPRECATION',)
-
- def test_snappy_version(self):
- """Test snappy version output"""
- out = self.get_data_file('snapd')
- self.assertIn('Status: install ok installed', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snappy.yaml b/tests/cloud_tests/testcases/modules/snappy.yaml
deleted file mode 100644
index 8ac322ae..00000000
--- a/tests/cloud_tests/testcases/modules/snappy.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Install snappy
-#
-# Aug 17, 2018: Disabled due to requiring a proxy for testing
-# tests do not handle the proxy well at this time.
-enabled: False
-required_features:
- - snap
-cloud_config: |
- #cloud-config
- snappy:
- system_snappy: auto
-collect_scripts:
- snapd: |
- #!/bin/bash
- dpkg -s snapd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
index e7329d48..02935447 100644
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
+++ b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
@@ -11,6 +11,6 @@ class TestSshKeyFingerprintsDisable(base.CloudTestCase):
"""Verify disabled."""
out = self.get_data_file('cloud-init.log')
self.assertIn('Skipping module named ssh-authkey-fingerprints, '
- 'logging of ssh fingerprints disabled', out)
+ 'logging of SSH fingerprints disabled', out)
# vi: ts=4 expandtab
diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output
index 3de15a5a..f64c2f60 100644
--- a/tests/data/netinfo/freebsd-ifconfig-output
+++ b/tests/data/netinfo/freebsd-ifconfig-output
@@ -1,17 +1,39 @@
vtnet0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
- options=6c07bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO,LINKSTATE,RXCSUM_IPV6,TXCSUM_IPV6>
- ether fa:16:3e:14:1f:99
- hwaddr fa:16:3e:14:1f:99
- inet 10.1.80.61 netmask 0xfffff000 broadcast 10.1.95.255
- nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
- media: Ethernet 10Gbase-T <full-duplex>
- status: active
-pflog0: flags=0<> metric 0 mtu 33160
-pfsync0: flags=0<> metric 0 mtu 1500
- syncpeer: 0.0.0.0 maxupd: 128 defer: off
+ options=6c07bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO,LINKSTATE,RXCSUM_IPV6,TXCSUM_IPV6>
+ ether 52:54:00:50:b7:0d
+re0.33: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500
+ options=80003<RXCSUM,TXCSUM,LINKSTATE>
+ ether 80:00:73:63:5c:48
+ groups: vlan
+ vlan: 33 vlanpcp: 0 parent interface: re0
+ media: Ethernet autoselect (1000baseT <full-duplex,master>)
+ status: active
+ nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
+bridge0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
+ ether 02:14:39:0e:25:00
+ inet 192.168.1.1 netmask 0xffffff00 broadcast 192.168.1.255
+ id 00:00:00:00:00:00 priority 32768 hellotime 2 fwddelay 15
+ maxage 20 holdcnt 6 proto rstp maxaddr 2000 timeout 1200
+ root id 00:00:00:00:00:00 priority 32768 ifcost 0 port 0
+ member: vnet0:11 flags=143<LEARNING,DISCOVER,AUTOEDGE,AUTOPTP>
+ ifmaxaddr 0 port 5 priority 128 path cost 2000
+ member: vnet0:1 flags=143<LEARNING,DISCOVER,AUTOEDGE,AUTOPTP>
+ ifmaxaddr 0 port 4 priority 128 path cost 2000
+ groups: bridge
+ nd6 options=9<PERFORMNUD,IFDISABLED>
+vnet0:11: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500
+ description: 'associated with jail: webirc'
+ options=8<VLAN_MTU>
+ ether 02:ff:60:8c:f3:72
+ hwaddr 02:2b:bb:64:3f:0a
+ inet6 fe80::2b:bbff:fe64:3f0a%vnet0:11 prefixlen 64 tentative scopeid 0x5
+ groups: epair
+ media: Ethernet 10Gbase-T (10Gbase-T <full-duplex>)
+ status: active
+ nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
- options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6>
- inet6 ::1 prefixlen 128
- inet6 fe80::1%lo0 prefixlen 64 scopeid 0x4
- inet 127.0.0.1 netmask 0xff000000
- nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
+ options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6>
+ inet6 ::1 prefixlen 128
+ inet6 fe80::1%lo0 prefixlen 64 scopeid 0x2
+ inet 127.0.0.1 netmask 0xff000000
+ nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
diff --git a/tests/data/netinfo/freebsd-netdev-formatted-output b/tests/data/netinfo/freebsd-netdev-formatted-output
index a9d2ac14..a0d937b3 100644
--- a/tests/data/netinfo/freebsd-netdev-formatted-output
+++ b/tests/data/netinfo/freebsd-netdev-formatted-output
@@ -1,11 +1,12 @@
-+++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++
-+---------+-------+----------------+------------+-------+-------------------+
-| Device | Up | Address | Mask | Scope | Hw-Address |
-+---------+-------+----------------+------------+-------+-------------------+
-| lo0 | True | 127.0.0.1 | 0xff000000 | . | . |
-| lo0 | True | ::1/128 | . | . | . |
-| lo0 | True | fe80::1%lo0/64 | . | 0x4 | . |
-| pflog0 | False | . | . | . | . |
-| pfsync0 | False | . | . | . | . |
-| vtnet0 | True | 10.1.80.61 | 0xfffff000 | . | fa:16:3e:14:1f:99 |
-+---------+-------+----------------+------------+-------+-------------------+
++++++++++++++++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++++++++++++++++
++----------+------+-------------------------------------+------------+-------+-------------------+
+| Device | Up | Address | Mask | Scope | Hw-Address |
++----------+------+-------------------------------------+------------+-------+-------------------+
+| bridge0 | True | 192.168.1.1 | 0xffffff00 | . | 02:14:39:0e:25:00 |
+| lo0 | True | 127.0.0.1 | 0xff000000 | . | . |
+| lo0 | True | ::1/128 | . | . | . |
+| lo0 | True | fe80::1%lo0/64 | . | 0x2 | . |
+| re0.33 | True | . | . | . | 80:00:73:63:5c:48 |
+| vnet0:11 | True | fe80::2b:bbff:fe64:3f0a%vnet0:11/64 | . | 0x5 | 02:2b:bb:64:3f:0a |
+| vtnet0 | True | . | . | . | 52:54:00:50:b7:0d |
++----------+------+-------------------------------------+------------+-------+-------------------+
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 59e351de..a809fd87 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -477,7 +477,7 @@ scbus-1 on xpt0 bus 0
'public-keys': [],
})
- self.instance_id = 'test-instance-id'
+ self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
def _dmi_mocks(key):
if key == 'system-uuid':
@@ -645,7 +645,7 @@ scbus-1 on xpt0 bus 0
'azure_data': {
'configurationsettype': 'LinuxProvisioningConfiguration'},
'imds': NETWORK_METADATA,
- 'instance-id': 'test-instance-id',
+ 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8',
'local-hostname': u'myhost',
'random_seed': 'wild'}
@@ -1091,6 +1091,24 @@ scbus-1 on xpt0 bus 0
self.assertTrue(ret)
self.assertEqual('value', dsrc.metadata['test'])
+ def test_instance_id_endianness(self):
+ """Return the previous iid when dmi uuid is the byteswapped iid."""
+ ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ # byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
+ '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
+ ds.get_data()
+ self.assertEqual(
+ '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
+ # not byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
+ '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
+ ds.get_data()
+ self.assertEqual(
+ 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
+
def test_instance_id_from_dmidecode_used(self):
ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
ds.get_data()
@@ -1292,7 +1310,7 @@ class TestAzureBounce(CiTestCase):
def _dmi_mocks(key):
if key == 'system-uuid':
- return 'test-instance-id'
+ return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
elif key == 'chassis-asset-tag':
return '7783-7084-3265-9085-8269-3286-77'
raise RuntimeError('should not get here')
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index bd17f636..007df09f 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -170,6 +170,25 @@ class TestGoalStateParsing(CiTestCase):
goal_state = self._get_goal_state(instance_id=instance_id)
self.assertEqual(instance_id, goal_state.instance_id)
+ def test_instance_id_byte_swap(self):
+ """Return true when previous_iid is byteswapped current_iid"""
+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8"
+ self.assertTrue(
+ azure_helper.is_byte_swapped(previous_iid, current_iid))
+
+ def test_instance_id_no_byte_swap_same_instance_id(self):
+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ self.assertFalse(
+ azure_helper.is_byte_swapped(previous_iid, current_iid))
+
+ def test_instance_id_no_byte_swap_diff_instance_id(self):
+ previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
+ self.assertFalse(
+ azure_helper.is_byte_swapped(previous_iid, current_iid))
+
def test_certificates_xml_parsed_and_fetched_correctly(self):
http_client = mock.MagicMock()
certificates_url = 'TestCertificatesUrl'
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 61a7a762..4ab5d471 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -4,6 +4,7 @@ from cloudinit import settings
from cloudinit import sources
from cloudinit import type_utils
from cloudinit.sources import (
+ DataSource,
DataSourceAliYun as AliYun,
DataSourceAltCloud as AltCloud,
DataSourceAzure as Azure,
@@ -23,6 +24,7 @@ from cloudinit.sources import (
DataSourceOpenStack as OpenStack,
DataSourceOracle as Oracle,
DataSourceOVF as OVF,
+ DataSourceRbxCloud as RbxCloud,
DataSourceScaleway as Scaleway,
DataSourceSmartOS as SmartOS,
)
@@ -44,6 +46,7 @@ DEFAULT_LOCAL = [
SmartOS.DataSourceSmartOS,
Ec2.DataSourceEc2Local,
OpenStack.DataSourceOpenStackLocal,
+ RbxCloud.DataSourceRbxCloud,
Scaleway.DataSourceScaleway,
]
@@ -86,7 +89,6 @@ class ExpectedDataSources(test_helpers.TestCase):
class TestDataSourceInvariants(test_helpers.TestCase):
-
def test_data_sources_have_valid_network_config_sources(self):
for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
for cfg_src in ds.network_config_sources:
@@ -95,5 +97,14 @@ class TestDataSourceInvariants(test_helpers.TestCase):
self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src),
fail_msg)
+ def test_expected_dsname_defined(self):
+ for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
+ fail_msg = (
+ '{} has an invalid / missing dsname property: {}'.format(
+ str(ds), str(ds.dsname)
+ )
+ )
+ self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg)
+ self.assertIsNotNone(ds.dsname)
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index 40624952..ef11784d 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -206,7 +206,7 @@ class TestCreateUser(CiTestCase):
user = 'foouser'
self.dist.create_user(user, ssh_redirect_user='someuser')
self.assertIn(
- 'WARNING: Unable to disable ssh logins for foouser given '
+ 'WARNING: Unable to disable SSH logins for foouser given '
'ssh_redirect_user: someuser. No cloud public-keys present.\n',
self.logs.getvalue())
m_setup_user_keys.assert_not_called()
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 67209955..aeaadaa0 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import os
from six import StringIO
from textwrap import dedent
@@ -14,7 +15,7 @@ from cloudinit.distros.parsers.sys_conf import SysConf
from cloudinit import helpers
from cloudinit import settings
from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, dir2dict, populate_dir)
+ FilesystemMockingTestCase, dir2dict)
from cloudinit import util
@@ -109,13 +110,31 @@ auto eth1
iface eth1 inet dhcp
"""
+V1_NET_CFG_IPV6_OUTPUT = """\
+# This file is generated from information provided by the datasource. Changes
+# to it will not persist across an instance reboot. To disable cloud-init's
+# network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet6 static
+ address 2607:f0d0:1002:0011::2/64
+ gateway 2607:f0d0:1002:0011::1
+
+auto eth1
+iface eth1 inet dhcp
+"""
+
V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0',
'subnets': [{'address':
'2607:f0d0:1002:0011::2',
'gateway':
'2607:f0d0:1002:0011::1',
'netmask': '64',
- 'type': 'static'}],
+ 'type': 'static6'}],
'type': 'physical'},
{'name': 'eth1',
'subnets': [{'control': 'auto',
@@ -141,6 +160,23 @@ network:
dhcp4: true
"""
+V1_TO_V2_NET_CFG_IPV6_OUTPUT = """\
+# This file is generated from information provided by the datasource. Changes
+# to it will not persist across an instance reboot. To disable cloud-init's
+# network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+network:
+ version: 2
+ ethernets:
+ eth0:
+ addresses:
+ - 2607:f0d0:1002:0011::2/64
+ gateway6: 2607:f0d0:1002:0011::1
+ eth1:
+ dhcp4: true
+"""
+
V2_NET_CFG = {
'ethernets': {
'eth7': {
@@ -213,128 +249,95 @@ class TestNetCfgDistroBase(FilesystemMockingTestCase):
self.assertEqual(v, b2[k])
-class TestNetCfgDistroFreebsd(TestNetCfgDistroBase):
+class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase):
- frbsd_ifout = """\
-hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
- options=51b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,TSO4,LRO>
- ether 00:15:5d:4c:73:00
- inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2
- inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255
- nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL>
- media: Ethernet autoselect (10Gbase-T <full-duplex>)
- status: active
+ def setUp(self):
+ super(TestNetCfgDistroFreeBSD, self).setUp()
+ self.distro = self._get_distro('freebsd', renderers=['freebsd'])
+
+ def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None,
+ bringup=False):
+ if not expected_cfgs:
+ raise ValueError('expected_cfg must not be None')
+
+ tmpd = None
+ with mock.patch('cloudinit.net.freebsd.available') as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ util.ensure_dir('/etc')
+ util.ensure_file('/etc/rc.conf')
+ util.ensure_file('/etc/resolv.conf')
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ print("----------")
+ print(expected)
+ print("^^^^ expected | rendered VVVVVVV")
+ print(results[cfgpath])
+ print("----------")
+ self.assertEqual(
+ set(expected.split('\n')),
+ set(results[cfgpath].split('\n')))
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_apply_network_config_freebsd_standard(self, ifaces_mac):
+ ifaces_mac.return_value = {
+ '00:15:5d:4c:73:00': 'eth0',
+ }
+ rc_conf_expected = """\
+defaultrouter=192.168.1.254
+ifconfig_eth0='192.168.1.5 netmask 255.255.255.0'
+ifconfig_eth1=DHCP
"""
- @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list')
- @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out')
- def test_get_ip_nic_freebsd(self, ifname_out, iflist):
- frbsd_distro = self._get_distro('freebsd')
- iflist.return_value = "lo0 hn0"
- ifname_out.return_value = self.frbsd_ifout
- res = frbsd_distro.get_ipv4()
- self.assertEqual(res, ['lo0', 'hn0'])
- res = frbsd_distro.get_ipv6()
- self.assertEqual(res, [])
-
- @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ether')
- @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out')
- @mock.patch('cloudinit.distros.freebsd.Distro.get_interface_mac')
- def test_generate_fallback_config_freebsd(self, mac, ifname_out, if_ether):
- frbsd_distro = self._get_distro('freebsd')
-
- if_ether.return_value = 'hn0'
- ifname_out.return_value = self.frbsd_ifout
- mac.return_value = '00:15:5d:4c:73:00'
- res = frbsd_distro.generate_fallback_config()
- self.assertIsNotNone(res)
-
- def test_simple_write_freebsd(self):
- fbsd_distro = self._get_distro('freebsd')
-
- rc_conf = '/etc/rc.conf'
- read_bufs = {
- rc_conf: 'initial-rc-conf-not-validated',
- '/etc/resolv.conf': 'initial-resolv-conf-not-validated',
+ expected_cfgs = {
+ '/etc/rc.conf': rc_conf_expected,
+ '/etc/resolv.conf': ''
}
+ self._apply_and_verify_freebsd(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy())
- tmpd = self.tmp_dir()
- populate_dir(tmpd, read_bufs)
- with self.reRooted(tmpd):
- with mock.patch("cloudinit.distros.freebsd.util.subp",
- return_value=('vtnet0', '')):
- fbsd_distro.apply_network(BASE_NET_CFG, False)
- results = dir2dict(tmpd)
-
- self.assertIn(rc_conf, results)
- self.assertCfgEquals(
- dedent('''\
- ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0"
- ifconfig_vtnet1="DHCP"
- defaultrouter="192.168.1.254"
- '''), results[rc_conf])
- self.assertEqual(0o644, get_mode(rc_conf, tmpd))
-
- def test_simple_write_freebsd_from_v2eni(self):
- fbsd_distro = self._get_distro('freebsd')
-
- rc_conf = '/etc/rc.conf'
- read_bufs = {
- rc_conf: 'initial-rc-conf-not-validated',
- '/etc/resolv.conf': 'initial-resolv-conf-not-validated',
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_apply_network_config_freebsd_ifrename(self, ifaces_mac):
+ ifaces_mac.return_value = {
+ '00:15:5d:4c:73:00': 'vtnet0',
}
+ rc_conf_expected = """\
+ifconfig_vtnet0_name=eth0
+defaultrouter=192.168.1.254
+ifconfig_eth0='192.168.1.5 netmask 255.255.255.0'
+ifconfig_eth1=DHCP
+"""
+
+ V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG)
+ V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00'
- tmpd = self.tmp_dir()
- populate_dir(tmpd, read_bufs)
- with self.reRooted(tmpd):
- with mock.patch("cloudinit.distros.freebsd.util.subp",
- return_value=('vtnet0', '')):
- fbsd_distro.apply_network(BASE_NET_CFG_FROM_V2, False)
- results = dir2dict(tmpd)
-
- self.assertIn(rc_conf, results)
- self.assertCfgEquals(
- dedent('''\
- ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0"
- ifconfig_vtnet1="DHCP"
- defaultrouter="192.168.1.254"
- '''), results[rc_conf])
- self.assertEqual(0o644, get_mode(rc_conf, tmpd))
-
- def test_apply_network_config_fallback_freebsd(self):
- fbsd_distro = self._get_distro('freebsd')
-
- # a weak attempt to verify that we don't have an implementation
- # of _write_network_config or apply_network_config in fbsd now,
- # which would make this test not actually test the fallback.
- self.assertRaises(
- NotImplementedError, fbsd_distro._write_network_config,
- BASE_NET_CFG)
-
- # now run
- mynetcfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}],
- 'version': 1}
-
- rc_conf = '/etc/rc.conf'
- read_bufs = {
- rc_conf: 'initial-rc-conf-not-validated',
- '/etc/resolv.conf': 'initial-resolv-conf-not-validated',
+ expected_cfgs = {
+ '/etc/rc.conf': rc_conf_expected,
+ '/etc/resolv.conf': ''
}
+ self._apply_and_verify_freebsd(self.distro.apply_network_config,
+ V1_NET_CFG_RENAME,
+ expected_cfgs=expected_cfgs.copy())
- tmpd = self.tmp_dir()
- populate_dir(tmpd, read_bufs)
- with self.reRooted(tmpd):
- with mock.patch("cloudinit.distros.freebsd.util.subp",
- return_value=('vtnet0', '')):
- fbsd_distro.apply_network_config(mynetcfg, bring_up=False)
- results = dir2dict(tmpd)
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_apply_network_config_freebsd_nameserver(self, ifaces_mac):
+ ifaces_mac.return_value = {
+ '00:15:5d:4c:73:00': 'eth0',
+ }
- self.assertIn(rc_conf, results)
- self.assertCfgEquals('ifconfig_vtnet0="DHCP"', results[rc_conf])
- self.assertEqual(0o644, get_mode(rc_conf, tmpd))
+ V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG)
+ ns = ['1.2.3.4']
+ V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns
+ expected_cfgs = {
+ '/etc/resolv.conf': 'nameserver 1.2.3.4\n'
+ }
+ self._apply_and_verify_freebsd(self.distro.apply_network_config,
+ V1_NET_CFG_DNS,
+ expected_cfgs=expected_cfgs.copy())
class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
@@ -376,6 +379,14 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
V1_NET_CFG,
expected_cfgs=expected_cfgs.copy())
+ def test_apply_network_config_ipv6_ub(self):
+ expected_cfgs = {
+ self.eni_path(): V1_NET_CFG_IPV6_OUTPUT
+ }
+ self._apply_and_verify_eni(self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy())
+
class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
def setUp(self):
@@ -419,6 +430,16 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
V1_NET_CFG,
expected_cfgs=expected_cfgs.copy())
+ def test_apply_network_config_v1_ipv6_to_netplan_ub(self):
+ expected_cfgs = {
+ self.netplan_path(): V1_TO_V2_NET_CFG_IPV6_OUTPUT,
+ }
+
+ # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False)
+ self._apply_and_verify_netplan(self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy())
+
def test_apply_network_config_v2_passthrough_ub(self):
expected_cfgs = {
self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT,
@@ -694,10 +715,11 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
"""),
}
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
- with_netplan=True)
+ with mock.patch('cloudinit.util.is_FreeBSD', return_value=False):
+ self._apply_and_verify(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ with_netplan=True)
def get_mode(path, target=None):
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 12c6ae36..36d7fbbf 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -140,7 +140,8 @@ class DsIdentifyBase(CiTestCase):
{'name': 'blkid', 'out': BLKID_EFI_ROOT},
{'name': 'ovf_vmware_transport_guestinfo',
'out': 'No value found', 'ret': 1},
-
+ {'name': 'dmi_decode', 'ret': 1,
+ 'err': 'No dmidecode program. ERROR.'},
]
written = [d['name'] for d in mocks]
@@ -625,6 +626,21 @@ class TestDsIdentify(DsIdentifyBase):
self._test_ds_not_found('Ec2-E24Cloud-negative')
+class TestBSDNoSys(DsIdentifyBase):
+ """Test *BSD code paths
+
+ FreeBSD doesn't have /sys so we use dmidecode(8) here
+ It also doesn't have systemd-detect-virt(8), so we use sysctl(8) to query
+ kern.vm_guest, and optionally map it"""
+
+ def test_dmi_decode(self):
+ """Test that dmidecode(8) works on systems which don't have /sys
+
+ This will be used on *BSD systems.
+ """
+ self._test_ds_found('Hetzner-dmidecode')
+
+
class TestIsIBMProvisioning(DsIdentifyBase):
"""Test the is_ibm_provisioning method in ds-identify."""
@@ -923,6 +939,12 @@ VALID_CFG = {
'ds': 'Hetzner',
'files': {P_SYS_VENDOR: 'Hetzner\n'},
},
+ 'Hetzner-dmidecode': {
+ 'ds': 'Hetzner',
+ 'mocks': [
+ {'name': 'dmi_decode', 'ret': 0, 'RET': 'Hetzner'}
+ ],
+ },
'IBMCloud-metadata': {
'ds': 'IBMCloud',
'mocks': [
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index a3e46351..1f39ebe7 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -52,6 +52,18 @@ growpart disk partition
Resize partition 1 on /dev/sda
"""
+HELP_GPART = """
+usage: gpart add -t type [-a alignment] [-b start] <SNIP> geom
+ gpart backup geom
+ gpart bootcode [-b bootcode] [-p partcode -i index] [-f flags] geom
+<SNIP>
+ gpart resize -i index [-a alignment] [-s size] [-f flags] geom
+ gpart restore [-lF] [-f flags] provider [...]
+ gpart recover [-f flags] geom
+ gpart help
+<SNIP>
+"""
+
class TestDisabled(unittest.TestCase):
def setUp(self):
@@ -97,8 +109,9 @@ class TestConfig(TestCase):
self.handle(self.name, config, self.cloud_init, self.log,
self.args)
- mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
+ mockobj.assert_has_calls([
+ mock.call(['growpart', '--help'], env={'LANG': 'C'}),
+ mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])])
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_mode_growpart_is_exception(self):
@@ -124,6 +137,18 @@ class TestConfig(TestCase):
mockobj.assert_called_once_with(
['growpart', '--help'], env={'LANG': 'C'})
+ @mock.patch.dict("os.environ", clear=True)
+ def test_mode_auto_falls_back_to_gpart(self):
+ with mock.patch.object(
+ util, 'subp',
+ return_value=("", HELP_GPART)) as mockobj:
+ ret = cc_growpart.resizer_factory(mode="auto")
+ self.assertIsInstance(ret, cc_growpart.ResizeGpart)
+
+ mockobj.assert_has_calls([
+ mock.call(['growpart', '--help'], env={'LANG': 'C'}),
+ mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])])
+
def test_handle_with_no_growpart_entry(self):
# if no 'growpart' entry in config, then mode=auto should be used
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
deleted file mode 100644
index 76b79c29..00000000
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ /dev/null
@@ -1,601 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_snappy import (
- makeop, get_package_ops, render_snap_op)
-from cloudinit.config.cc_snap_config import (
- add_assertions, add_snap_user, ASSERTIONS_FILE)
-from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.config.cc_snap_config import handle as snap_handle
-from cloudinit.sources import DataSourceNone
-from cloudinit.tests.helpers import FilesystemMockingTestCase, mock
-
-from cloudinit.tests import helpers as t_help
-
-import logging
-import os
-import shutil
-import tempfile
-import textwrap
-import yaml
-
-LOG = logging.getLogger(__name__)
-ALLOWED = (dict, list, int, str)
-
-
-class TestInstallPackages(t_help.TestCase):
- def setUp(self):
- super(TestInstallPackages, self).setUp()
- self.unapply = []
-
- # by default 'which' has nothing in its path
- self.apply_patches([(util, 'subp', self._subp)])
- self.subp_called = []
- self.snapcmds = []
- self.tmp = tempfile.mkdtemp(prefix="TestInstallPackages")
-
- def tearDown(self):
- apply_patches([i for i in reversed(self.unapply)])
- shutil.rmtree(self.tmp)
-
- def apply_patches(self, patches):
- ret = apply_patches(patches)
- self.unapply += ret
-
- def populate_tmp(self, files):
- return t_help.populate_dir(self.tmp, files)
-
- def _subp(self, *args, **kwargs):
- # supports subp calling with cmd as args or kwargs
- if 'args' not in kwargs:
- kwargs['args'] = args[0]
- self.subp_called.append(kwargs)
- args = kwargs['args']
- # here we basically parse the snappy command invoked
- # and append to snapcmds a list of (mode, pkg, config)
- if args[0:2] == ['snappy', 'config']:
- if args[3] == "-":
- config = kwargs.get('data', '')
- else:
- with open(args[3], "rb") as fp:
- config = yaml.safe_load(fp.read())
- self.snapcmds.append(['config', args[2], config])
- elif args[0:2] == ['snappy', 'install']:
- config = None
- pkg = None
- for arg in args[2:]:
- if arg.startswith("-"):
- continue
- if not pkg:
- pkg = arg
- elif not config:
- cfgfile = arg
- if cfgfile == "-":
- config = kwargs.get('data', '')
- elif cfgfile:
- with open(cfgfile, "rb") as fp:
- config = yaml.safe_load(fp.read())
- self.snapcmds.append(['install', pkg, config])
-
- def test_package_ops_1(self):
- ret = get_package_ops(
- packages=['pkg1', 'pkg2', 'pkg3'],
- configs={'pkg2': b'mycfg2'}, installed=[])
- self.assertEqual(
- ret, [makeop('install', 'pkg1', None, None),
- makeop('install', 'pkg2', b'mycfg2', None),
- makeop('install', 'pkg3', None, None)])
-
- def test_package_ops_config_only(self):
- ret = get_package_ops(
- packages=None,
- configs={'pkg2': b'mycfg2'}, installed=['pkg1', 'pkg2'])
- self.assertEqual(
- ret, [makeop('config', 'pkg2', b'mycfg2')])
-
- def test_package_ops_install_and_config(self):
- ret = get_package_ops(
- packages=['pkg3', 'pkg2'],
- configs={'pkg2': b'mycfg2', 'xinstalled': b'xcfg'},
- installed=['xinstalled'])
- self.assertEqual(
- ret, [makeop('install', 'pkg3'),
- makeop('install', 'pkg2', b'mycfg2'),
- makeop('config', 'xinstalled', b'xcfg')])
-
- def test_package_ops_install_long_config_short(self):
- # a package can be installed by full name, but have config by short
- cfg = {'k1': 'k2'}
- ret = get_package_ops(
- packages=['config-example.canonical'],
- configs={'config-example': cfg}, installed=[])
- self.assertEqual(
- ret, [makeop('install', 'config-example.canonical', cfg)])
-
- def test_package_ops_with_file(self):
- self.populate_tmp(
- {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg",
- "snapf2.snap": b"foo2", "foo.bar": "ignored"})
- ret = get_package_ops(
- packages=['pkg1'], configs={}, installed=[], fspath=self.tmp)
- self.assertEqual(
- ret,
- [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
- cfgfile="snapf1.config"),
- makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"),
- makeop('install', 'pkg1')])
-
- def test_package_ops_common_filename(self):
- # fish package name from filename
- # package names likely look like: pkgname.namespace_version_arch.snap
-
- # find filenames
- self.populate_tmp(
- {"pkg-ws.smoser_0.3.4_all.snap": "pkg-ws-snapdata",
- "pkg-ws.config": "pkg-ws-config",
- "pkg1.smoser_1.2.3_all.snap": "pkg1.snapdata",
- "pkg1.smoser.config": "pkg1.smoser.config-data",
- "pkg1.config": "pkg1.config-data",
- "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata",
- "pkg2.smoser_0.0_amd64.config": "pkg2.config"})
-
- ret = get_package_ops(
- packages=[], configs={}, installed=[], fspath=self.tmp)
- self.assertEqual(
- ret,
- [makeop_tmpd(self.tmp, 'install', 'pkg-ws.smoser',
- path="pkg-ws.smoser_0.3.4_all.snap",
- cfgfile="pkg-ws.config"),
- makeop_tmpd(self.tmp, 'install', 'pkg1.smoser',
- path="pkg1.smoser_1.2.3_all.snap",
- cfgfile="pkg1.smoser.config"),
- makeop_tmpd(self.tmp, 'install', 'pkg2.smoser',
- path="pkg2.smoser_0.0_amd64.snap",
- cfgfile="pkg2.smoser_0.0_amd64.config"),
- ])
-
- def test_package_ops_config_overrides_file(self):
- # config data overrides local file .config
- self.populate_tmp(
- {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg"})
- ret = get_package_ops(
- packages=[], configs={'snapf1': 'snapf1cfg-config'},
- installed=[], fspath=self.tmp)
- self.assertEqual(
- ret, [makeop_tmpd(self.tmp, 'install', 'snapf1',
- path="snapf1.snap", config="snapf1cfg-config")])
-
- def test_package_ops_namespacing(self):
- cfgs = {
- 'config-example': {'k1': 'v1'},
- 'pkg1': {'p1': 'p2'},
- 'ubuntu-core': {'c1': 'c2'},
- 'notinstalled.smoser': {'s1': 's2'},
- }
- ret = get_package_ops(
- packages=['config-example.canonical'], configs=cfgs,
- installed=['config-example.smoser', 'pkg1.canonical',
- 'ubuntu-core'])
-
- expected_configs = [
- makeop('config', 'pkg1', config=cfgs['pkg1']),
- makeop('config', 'ubuntu-core', config=cfgs['ubuntu-core'])]
- expected_installs = [
- makeop('install', 'config-example.canonical',
- config=cfgs['config-example'])]
-
- installs = [i for i in ret if i['op'] == 'install']
- configs = [c for c in ret if c['op'] == 'config']
-
- self.assertEqual(installs, expected_installs)
- # configs are not ordered
- self.assertEqual(len(configs), len(expected_configs))
- self.assertTrue(all(found in expected_configs for found in configs))
-
- def test_render_op_localsnap(self):
- self.populate_tmp({"snapf1.snap": b"foo1"})
- op = makeop_tmpd(self.tmp, 'install', 'snapf1',
- path='snapf1.snap')
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', op['path'], None]])
-
- def test_render_op_localsnap_localconfig(self):
- self.populate_tmp(
- {"snapf1.snap": b"foo1", 'snapf1.config': b'snapf1cfg'})
- op = makeop_tmpd(self.tmp, 'install', 'snapf1',
- path='snapf1.snap', cfgfile='snapf1.config')
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', op['path'], 'snapf1cfg']])
-
- def test_render_op_snap(self):
- op = makeop('install', 'snapf1')
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', 'snapf1', None]])
-
- def test_render_op_snap_config(self):
- mycfg = {'key1': 'value1'}
- name = "snapf1"
- op = makeop('install', name, config=mycfg)
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['install', name, {'config': {name: mycfg}}]])
-
- def test_render_op_config_bytes(self):
- name = "snapf1"
- mycfg = b'myconfig'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
-
- def test_render_op_config_string(self):
- name = 'snapf1'
- mycfg = 'myconfig: foo\nhisconfig: bar\n'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- self.assertEqual(
- self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
-
- def test_render_op_config_dict(self):
- # config entry for package can be a dict, not a string blob
- mycfg = {'foo': 'bar'}
- name = 'snapf1'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- # snapcmds is a list of 3-entry lists. data_found will be the
- # blob of data in the file in 'snappy install --config=<file>'
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_op_config_list(self):
- # config entry for package can be a list, not a string blob
- mycfg = ['foo', 'bar', 'wark', {'f1': 'b1'}]
- name = "snapf1"
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_op_config_int(self):
- # config entry for package can be a list, not a string blob
- mycfg = 1
- name = 'snapf1'
- op = makeop('config', name, config=mycfg)
- render_snap_op(**op)
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_long_configs_short(self):
- # install a namespaced package should have un-namespaced config
- mycfg = {'k1': 'k2'}
- name = 'snapf1'
- op = makeop('install', name + ".smoser", config=mycfg)
- render_snap_op(**op)
- data_found = self.snapcmds[0][2]
- self.assertEqual(mycfg, data_found['config'][name])
-
- def test_render_does_not_pad_cfgfile(self):
- # package_ops with cfgfile should not modify --file= content.
- mydata = "foo1: bar1\nk: [l1, l2, l3]\n"
- self.populate_tmp(
- {"snapf1.snap": b"foo1", "snapf1.config": mydata.encode()})
- ret = get_package_ops(
- packages=[], configs={}, installed=[], fspath=self.tmp)
- self.assertEqual(
- ret,
- [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
- cfgfile="snapf1.config")])
-
- # now the op was ok, but test that render didn't mess it up.
- render_snap_op(**ret[0])
- data_found = self.snapcmds[0][2]
- # the data found gets loaded in the snapcmd interpretation
- # so this comparison is a bit lossy, but input to snappy config
- # is expected to be yaml loadable, so it should be OK.
- self.assertEqual(yaml.safe_load(mydata), data_found)
-
-
-class TestSnapConfig(FilesystemMockingTestCase):
-
- SYSTEM_USER_ASSERTION = textwrap.dedent("""
- type: system-user
- authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
- brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
- email: foo@bar.com
- password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt
- series:
- - 16
- since: 2016-09-10T16:34:00+03:00
- until: 2017-11-10T16:34:00+03:00
- username: baz
- sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj
-
- AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP
- Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI
- zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF
- s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj
- +to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP
- Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS
- d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q
- BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H
- f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V
- v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""")
-
- ACCOUNT_ASSERTION = textwrap.dedent("""
- type: account-key
- authority-id: canonical
- revision: 2
- public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0
- account-id: canonical
- name: store
- since: 2016-04-01T00:00:00.0Z
- body-length: 717
- sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH
-
- AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j
- qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482
- vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ
- UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK
- Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG
- o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl
- VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9
- 2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an
- Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc
- vUvV7RjVzv17ut0AEQEAAQ==
-
- AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM
- WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b
- nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL
- 3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL
- eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY
- inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1
- rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+
- rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE
- aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ
- 6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO
- haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF
- yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9
- HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi
- skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK
- CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde
- ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF
- qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR
- IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t
- oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""")
-
- test_assertions = [ACCOUNT_ASSERTION, SYSTEM_USER_ASSERTION]
-
- def setUp(self):
- super(TestSnapConfig, self).setUp()
- self.subp = util.subp
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- @mock.patch('cloudinit.util.write_file')
- @mock.patch('cloudinit.util.subp')
- def test_snap_config_add_assertions(self, msubp, mwrite):
- add_assertions(self.test_assertions)
-
- combined = "\n".join(self.test_assertions)
- mwrite.assert_any_call(ASSERTIONS_FILE, combined.encode('utf-8'))
- msubp.assert_called_with(['snap', 'ack', ASSERTIONS_FILE],
- capture=True)
-
- def test_snap_config_add_assertions_empty(self):
- self.assertRaises(ValueError, add_assertions, [])
-
- def test_add_assertions_nonlist(self):
- self.assertRaises(ValueError, add_assertions, {})
-
- @mock.patch('cloudinit.util.write_file')
- @mock.patch('cloudinit.util.subp')
- def test_snap_config_add_assertions_ack_fails(self, msubp, mwrite):
- msubp.side_effect = [util.ProcessExecutionError("Invalid assertion")]
- self.assertRaises(util.ProcessExecutionError, add_assertions,
- self.test_assertions)
-
- @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_handle_no_config(self, mock_util, mock_add):
- cfg = {}
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc.distro.name = 'ubuntu'
- mock_util.which.return_value = None
- snap_handle('snap_config', cfg, cc, LOG, None)
- mock_add.assert_not_called()
-
- def test_snap_config_add_snap_user_no_config(self):
- usercfg = add_snap_user(cfg=None)
- self.assertIsNone(usercfg)
-
- def test_snap_config_add_snap_user_not_dict(self):
- cfg = ['foobar']
- self.assertRaises(ValueError, add_snap_user, cfg)
-
- def test_snap_config_add_snap_user_no_email(self):
- cfg = {'assertions': [], 'known': True}
- usercfg = add_snap_user(cfg=cfg)
- self.assertIsNone(usercfg)
-
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_add_snap_user_email_only(self, mock_util):
- email = 'janet@planetjanet.org'
- cfg = {'email': email}
- mock_util.which.return_value = None
- mock_util.system_is_snappy.return_value = True
- mock_util.subp.side_effect = [
- ("false\n", ""), # snap managed
- ]
-
- usercfg = add_snap_user(cfg=cfg)
-
- self.assertEqual(usercfg, {'snapuser': email, 'known': False})
-
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_add_snap_user_email_known(self, mock_util):
- email = 'janet@planetjanet.org'
- known = True
- cfg = {'email': email, 'known': known}
- mock_util.which.return_value = None
- mock_util.system_is_snappy.return_value = True
- mock_util.subp.side_effect = [
- ("false\n", ""), # snap managed
- (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user
- ]
-
- usercfg = add_snap_user(cfg=cfg)
-
- self.assertEqual(usercfg, {'snapuser': email, 'known': known})
-
- @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_handle_system_not_snappy(self, mock_util, mock_add):
- cfg = {'snappy': {'assertions': self.test_assertions}}
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc.distro.name = 'ubuntu'
- mock_util.which.return_value = None
- mock_util.system_is_snappy.return_value = False
-
- snap_handle('snap_config', cfg, cc, LOG, None)
-
- mock_add.assert_not_called()
-
- @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_handle_snapuser(self, mock_util, mock_add):
- email = 'janet@planetjanet.org'
- cfg = {
- 'snappy': {
- 'assertions': self.test_assertions,
- 'email': email,
- }
- }
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc.distro.name = 'ubuntu'
- mock_util.which.return_value = None
- mock_util.system_is_snappy.return_value = True
- mock_util.subp.side_effect = [
- ("false\n", ""), # snap managed
- ]
-
- snap_handle('snap_config', cfg, cc, LOG, None)
-
- mock_add.assert_called_with(self.test_assertions)
- usercfg = {'snapuser': email, 'known': False}
- cc.distro.create_user.assert_called_with(email, **usercfg)
-
- @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_handle_snapuser_known(self, mock_util, mock_add):
- email = 'janet@planetjanet.org'
- cfg = {
- 'snappy': {
- 'assertions': self.test_assertions,
- 'email': email,
- 'known': True,
- }
- }
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc.distro.name = 'ubuntu'
- mock_util.which.return_value = None
- mock_util.system_is_snappy.return_value = True
- mock_util.subp.side_effect = [
- ("false\n", ""), # snap managed
- (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user
- ]
-
- snap_handle('snap_config', cfg, cc, LOG, None)
-
- mock_add.assert_called_with(self.test_assertions)
- usercfg = {'snapuser': email, 'known': True}
- cc.distro.create_user.assert_called_with(email, **usercfg)
-
- @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_handle_snapuser_known_managed(self, mock_util,
- mock_add):
- email = 'janet@planetjanet.org'
- cfg = {
- 'snappy': {
- 'assertions': self.test_assertions,
- 'email': email,
- 'known': True,
- }
- }
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc.distro.name = 'ubuntu'
- mock_util.which.return_value = None
- mock_util.system_is_snappy.return_value = True
- mock_util.subp.side_effect = [
- ("true\n", ""), # snap managed
- ]
-
- snap_handle('snap_config', cfg, cc, LOG, None)
-
- mock_add.assert_called_with(self.test_assertions)
- cc.distro.create_user.assert_not_called()
-
- @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
- @mock.patch('cloudinit.config.cc_snap_config.util')
- def test_snap_config_handle_snapuser_known_no_assertion(self, mock_util,
- mock_add):
- email = 'janet@planetjanet.org'
- cfg = {
- 'snappy': {
- 'assertions': [self.ACCOUNT_ASSERTION],
- 'email': email,
- 'known': True,
- }
- }
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc.distro.name = 'ubuntu'
- mock_util.which.return_value = None
- mock_util.system_is_snappy.return_value = True
- mock_util.subp.side_effect = [
- ("true\n", ""), # snap managed
- ("", ""), # snap known system-user
- ]
-
- snap_handle('snap_config', cfg, cc, LOG, None)
-
- mock_add.assert_called_with([self.ACCOUNT_ASSERTION])
- cc.distro.create_user.assert_not_called()
-
-
-def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
- if cfgfile:
- cfgfile = os.path.sep.join([tmpd, cfgfile])
- if path:
- path = os.path.sep.join([tmpd, path])
- return(makeop(op=op, name=name, config=config, path=path, cfgfile=cfgfile))
-
-
-def apply_patches(patches):
- ret = []
- for (ref, name, replace) in patches:
- if replace is None:
- continue
- orig = getattr(ref, name)
- setattr(ref, name, replace)
- ret.append((ref, name, orig))
- return ret
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
new file mode 100644
index 00000000..48296c30
--- /dev/null
+++ b/tests/unittests/test_net_freebsd.py
@@ -0,0 +1,19 @@
+from cloudinit import net
+
+from cloudinit.tests.helpers import (CiTestCase, mock, readResource)
+
+SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
+
+
+class TestInterfacesByMac(CiTestCase):
+
+ @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.util.is_FreeBSD')
+ def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp):
+ mock_is_FreeBSD.return_value = True
+ mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0)
+ a = net.get_interfaces_by_mac()
+ assert a == {'52:54:00:50:b7:0d': 'vtnet0',
+ '80:00:73:63:5c:48': 're0.33',
+ '02:14:39:0e:25:00': 'bridge0',
+ '02:ff:60:8c:f3:72': 'vnet0:11'}
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 73ae897f..b227c20b 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -1,11 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
from mock import patch
+from collections import namedtuple
from cloudinit import ssh_util
from cloudinit.tests import helpers as test_helpers
from cloudinit import util
+# https://stackoverflow.com/questions/11351032/
+FakePwEnt = namedtuple(
+ 'FakePwEnt',
+ ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid'])
+FakePwEnt.__new__.__defaults__ = tuple(
+ "UNSET_%s" % n for n in FakePwEnt._fields)
+
VALID_CONTENT = {
'dsa': (
@@ -326,4 +334,79 @@ class TestUpdateSshConfig(test_helpers.CiTestCase):
m_write_file.assert_not_called()
+class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
+ def test_user(self):
+ self.assertEqual(
+ ["/opt/bobby/keys"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u/keys", "/home/bobby", "bobby"))
+
+ def test_multiple(self):
+ self.assertEqual(
+ ["/keys/path1", "/keys/path2"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/keys/path1 /keys/path2", "/home/bobby", "bobby"))
+
+ def test_relative(self):
+ self.assertEqual(
+ ["/home/bobby/.secret/keys"],
+ ssh_util.render_authorizedkeysfile_paths(
+ ".secret/keys", "/home/bobby", "bobby"))
+
+ def test_home(self):
+ self.assertEqual(
+ ["/homedirs/bobby/.keys"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "%h/.keys", "/homedirs/bobby", "bobby"))
+
+
+class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ def test_multiple_authorizedkeys_file_order1(self, m_getpwnam):
+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby')
+ m_getpwnam.return_value = fpw
+ authorized_keys = self.tmp_path('authorized_keys')
+ util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+
+ user_keys = self.tmp_path('user_keys')
+ util.write_file(user_keys, VALID_CONTENT['dsa'])
+
+ sshd_config = self.tmp_path('sshd_config')
+ util.write_file(
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(
+ auth_key_entries, [])
+
+ self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['rsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ def test_multiple_authorizedkeys_file_order2(self, m_getpwnam):
+ fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie')
+ m_getpwnam.return_value = fpw
+ authorized_keys = self.tmp_path('authorized_keys')
+ util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+
+ user_keys = self.tmp_path('user_keys')
+ util.write_file(user_keys, VALID_CONTENT['dsa'])
+
+ sshd_config = self.tmp_path('sshd_config')
+ util.write_file(
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['rsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+
# vi: ts=4 expandtab
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
index f596fc7b..6b20d360 100644
--- a/tools/.lp-to-git-user
+++ b/tools/.lp-to-git-user
@@ -1,14 +1,27 @@
{
+ "adobrawy": "ad-m",
+ "afranceschini": "andreaf74",
+ "ahosmanmsft": "AOhassan",
+ "andreipoltavchenko": "pa-yourserveradmin-com",
+ "askon": "ask0n",
+ "bitfehler": "bitfehler",
"chad.smith": "blackboxsw",
"d-info-e": "do3meli",
- "eric-lafontaine1": "elafontaine"
+ "daniel-thewatkins": "OddBloke",
+ "eric-lafontaine1": "elafontaine",
+ "fredlefebvre": "fred-lefebvre",
+ "goneri": "goneri",
"harald-jensas": "hjensas",
"i.galic": "igalic",
"larsks": "larsks",
"legovini": "paride",
+ "louis": "karibou",
+ "madhuri-rai07": "madhuri-rai07",
+ "otubo": "otubo",
"pengpengs": "PengpengSun",
"powersj": "powersj",
"raharper": "raharper",
+ "rjschwei": "rjschwei",
"tribaal": "chrisglass",
"trstringer": "trstringer",
"xiaofengw": "xiaofengw-vmware"
diff --git a/tools/ds-identify b/tools/ds-identify
index 20a99ee9..c93d4a77 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -179,13 +179,39 @@ debug() {
echo "$@" 1>&3
}
+dmi_decode() {
+ local sys_field="$1" dmi_field="" val=""
+ command -v dmidecode >/dev/null 2>&1 || {
+ warn "No dmidecode program. Cannot read $sys_field."
+ return 1
+ }
+ case "$1" in
+ sys_vendor) dmi_field="system-manufacturer";;
+ product_name) dmi_field="system-product-name";;
+ product_uuid) dmi_field="system-uuid";;
+ product_serial) dmi_field="system-serial-number";;
+ chassis_asset_tag) dmi_field="chassis-asset-tag";;
+ *) error "Unknown field $sys_field. Cannot call dmidecode."
+ return 1;;
+ esac
+ val=$(dmidecode --quiet "--string=$dmi_field" 2>/dev/null) || return 1
+ _RET="$val"
+}
+
get_dmi_field() {
local path="${PATH_SYS_CLASS_DMI_ID}/$1"
- if [ ! -f "$path" ] || [ ! -r "$path" ]; then
- _RET="$UNAVAILABLE"
+ _RET="$UNAVAILABLE"
+ if [ -d "${PATH_SYS_CLASS_DMI_ID}" ]; then
+ if [ -f "$path" ] && [ -r "$path" ]; then
+ read _RET < "${path}" || _RET="$ERROR"
+ return
+ fi
+ # if `/sys/class/dmi/id` exists, but not the object we're looking for,
+ # do *not* fallback to dmidecode!
return
fi
- read _RET < "${path}" || _RET="$ERROR"
+ dmi_decode "$1" || _RET="$ERROR"
+ return
}
block_dev_with_label() {
@@ -267,6 +293,31 @@ detect_virt() {
if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then
virt="$out"
fi
+ elif [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then
+ # Map FreeBSD's vm_guest names to those systemd-detect-virt that
+ # don't match up. See
+ # https://github.com/freebsd/freebsd/blob/master/sys/kern/subr_param.c#L144-L160
+ # https://www.freedesktop.org/software/systemd/man/systemd-detect-virt.html
+ #
+ # systemd | kern.vm_guest
+ # ---------------------+---------------
+ # none | none
+ # kvm | kvm
+ # vmware | vmware
+ # microsoft | hv
+ # oracle | vbox
+ # xen | xen
+ # parallels | parallels
+ # bhyve | bhyve
+ # vm-other | generic
+ out=$(sysctl -qn kern.vm_guest 2>/dev/null) && {
+ case "$out" in
+ hv) virt="microsoft" ;;
+ vbox) virt="oracle" ;;
+ generic) "vm-other";;
+ *) virt="$out"
+ esac
+ }
fi
_RET="$virt"
}
diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github
index cbb34695..f1247cb3 100755
--- a/tools/migrate-lp-user-to-github
+++ b/tools/migrate-lp-user-to-github
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
"""Link your Launchpad user to github, proposing branches to LP and Github"""
from argparse import ArgumentParser
@@ -83,9 +83,6 @@ def get_parser():
parser.add_argument(
'-v', '--verbose', required=False, default=False, action='store_true',
help=('Print all actions.'))
- parser.add_argument(
- '--push-remote', required=False, dest='pushremote',
- help=('QA-only provide remote name into which you want to push'))
return parser
@@ -122,7 +119,12 @@ def add_lp_and_github_remotes(lp_user, gh_user):
" LP repo".format(lp_user))
lp_remote_name = 'launchpad-{}'.format(lp_user)
subp(['git', 'remote', 'add', lp_remote_name, lp_remote])
- subp(['git', 'fetch', lp_remote_name])
+ try:
+ subp(['git', 'fetch', lp_remote_name])
+ except:
+ log("launchpad: Pushing to ensure LP repo exists")
+ subp(['git', 'push', lp_remote_name, 'master:master'])
+ subp(['git', 'fetch', lp_remote_name])
if not gh_remote_name:
log("github: Creating git remote github-{} to point at your"
" GH repo".format(gh_user))
@@ -184,7 +186,7 @@ def main():
cleanup_repo_dir = False
cwd = os.getcwd()
os.chdir(repo_dir)
- log("Sycing master branch with upstream")
+ log("Syncing master branch with upstream")
subp(['git', 'checkout', 'master'])
subp(['git', 'pull'])
try:
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index a441f4ff..3d5fa725 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -4,8 +4,8 @@ import argparse
import os
import sys
-VARIANTS = ["arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse",
- "ubuntu", "unknown"]
+VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel",
+ "suse", "ubuntu", "unknown"]
if "avoid-pep8-E402-import-not-top-of-file":
_tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
diff --git a/tox.ini b/tox.ini
index 042346bb..8612f034 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,11 +1,13 @@
[tox]
-envlist = py27, py3, xenial, pycodestyle, pyflakes, pylint
+envlist = py3, xenial, pycodestyle, pyflakes, pylint
recreate = True
[testenv]
commands = python -m nose {posargs:tests/unittests cloudinit}
setenv =
LC_ALL = en_US.utf-8
+passenv=
+ NOSE_VERBOSE
[testenv:pycodestyle]
basepython = python3
@@ -55,9 +57,7 @@ exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
[testenv:doc]
basepython = python3
deps =
- doc8
- sphinx
- sphinx_rtd_theme
+ -r{toxinidir}/doc-requirements.txt
commands =
{envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
doc8 doc/rtd