summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPedro Alvarez <pedro.alvarez@codethink.co.uk>2013-10-10 11:03:34 +0000
committerPedro Alvarez <pedro.alvarez@codethink.co.uk>2013-10-10 11:03:34 +0000
commiteeb4923b3528ebf4d93d8297b2d489738c3ea7c3 (patch)
tree441977795acb25c905c4369ea6401db3b3310f3e
parent053df0396e93b71f3fb4c6f4b4a6b2710a26d517 (diff)
parentdd5ac6445d51f7fd09d6ca11c627c5f7de179968 (diff)
downloadcloud-init-eeb4923b3528ebf4d93d8297b2d489738c3ea7c3.tar.gz
Merge branch 'baserock/morph-rebase-7-3v2' into baserock/morph
Conflicts: config/cloud.cfg tools/read-dependencies tools/read-version
-rw-r--r--ChangeLog12
-rw-r--r--cloudinit/config/cc_disk_setup.py790
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py7
-rw-r--r--cloudinit/config/cc_mounts.py135
-rw-r--r--cloudinit/config/cc_seed_random.py61
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py2
-rw-r--r--cloudinit/distros/baserock.py127
-rw-r--r--cloudinit/helpers.py18
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceAzure.py32
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py44
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py442
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py76
-rw-r--r--cloudinit/sources/__init__.py18
-rw-r--r--cloudinit/stages.py12
-rw-r--r--cloudinit/util.py48
-rw-r--r--config/cloud.cfg82
-rw-r--r--config/cloud.cfg.d/baserock.cfg19
-rw-r--r--doc/examples/cloud-config-disk-setup.txt251
-rw-r--r--doc/examples/cloud-config-growpart.txt9
-rw-r--r--doc/examples/cloud-config-user-groups.txt4
-rw-r--r--doc/rtd/topics/datasources.rst6
-rw-r--r--doc/sources/opennebula/README.rst142
-rw-r--r--doc/sources/smartos/README.rst33
-rwxr-xr-xpackages/bddeb2
-rw-r--r--systemd/cloud-config.service2
-rw-r--r--systemd/cloud-final.service2
-rw-r--r--systemd/cloud-init-local.service2
-rw-r--r--systemd/cloud-init.service2
-rw-r--r--tests/unittests/test_datasource/test_azure.py60
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py267
-rw-r--r--tests/unittests/test_datasource/test_smartos.py35
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py150
-rw-r--r--tests/unittests/test_userdata.py2
-rw-r--r--tests/unittests/test_util.py2
-rwxr-xr-xtools/read-dependencies15
-rwxr-xr-xtools/read-version17
-rw-r--r--upstart/cloud-init-nonet.conf2
39 files changed, 2729 insertions, 204 deletions
diff --git a/ChangeLog b/ChangeLog
index 4b2770a4..20e3fcc3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -3,6 +3,7 @@
- small fix for OVF datasource for iso transport on non-iso9660 filesystem
- determine if upstart version is suitable for
'initctl reload-configuration' (LP: #1124384). If so, then invoke it.
+ supports setting up instance-store disk with partition table and filesystem.
- add Azure datasource.
- add support for SuSE / SLES [Juerg Haefliger]
- add a trailing carriage return to chpasswd input, which reportedly
@@ -15,6 +16,17 @@
which also reads from uptime. uptime is useful as clock may change during
boot due to ntp.
- prefer growpart resizer to 'parted resizepart' (LP: #1212492)
+ - support random data seed from config drive or azure, and a module
+ 'seed_random' to read that and write it to /dev/urandom.
+ - add OpenNebula Datasource [Vlastimil Holer]
+ - add 'cc_disk_setup' config module for paritioning disks and creating
+ filesystems. Useful if attached disks are not formatted (LP: #1218506)
+ - Fix usage of libselinux-python when selinux is disabled. [Garrett Holmstrom]
+ - multi_log: only write to /dev/console if it exists [Garrett Holmstrom]
+ - config/cloud.cfg: add 'sudo' to list groups for the default user
+ (LP: #1228228)
+ - documentation fix for use of 'mkpasswd' [Eric Nordlund]
+ - respect /etc/growroot-disabled file (LP: #1234331)
0.7.2:
- add a debian watch file
- add 'sudo' entry to ubuntu's default user (LP: #1080717)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
new file mode 100644
index 00000000..0b970e4e
--- /dev/null
+++ b/cloudinit/config/cc_disk_setup.py
@@ -0,0 +1,790 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+import logging
+import os
+import shlex
+
+frequency = PER_INSTANCE
+
+# Define the commands to use
+UDEVADM_CMD = util.which('udevadm')
+SFDISK_CMD = util.which("sfdisk")
+LSBLK_CMD = util.which("lsblk")
+BLKID_CMD = util.which("blkid")
+BLKDEV_CMD = util.which("blockdev")
+WIPEFS_CMD = util.which("wipefs")
+
+LOG = logging.getLogger(__name__)
+
+
+def handle(_name, cfg, cloud, log, _args):
+ """
+ See doc/examples/cloud-config_disk-setup.txt for documentation on the
+ format.
+ """
+ disk_setup = cfg.get("disk_setup")
+ if isinstance(disk_setup, dict):
+ update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
+ log.debug("Partitioning disks: %s", str(disk_setup))
+ for disk, definition in disk_setup.items():
+ if not isinstance(definition, dict):
+ log.warn("Invalid disk definition for %s" % disk)
+ continue
+
+ try:
+ log.debug("Creating new partition table/disk")
+ util.log_time(logfunc=LOG.debug,
+ msg="Creating partition on %s" % disk,
+ func=mkpart, args=(disk, definition))
+ except Exception as e:
+ util.logexc(LOG, "Failed partitioning operation\n%s" % e)
+
+ fs_setup = cfg.get("fs_setup")
+ if isinstance(fs_setup, list):
+ log.debug("setting up filesystems: %s", str(fs_setup))
+ update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
+ for definition in fs_setup:
+ if not isinstance(definition, dict):
+ log.warn("Invalid file system definition: %s" % definition)
+ continue
+
+ try:
+ log.debug("Creating new filesystem.")
+ device = definition.get('device')
+ util.log_time(logfunc=LOG.debug,
+ msg="Creating fs for %s" % device,
+ func=mkfs, args=(definition,))
+ except Exception as e:
+ util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
+
+
+def update_disk_setup_devices(disk_setup, tformer):
+ # update 'disk_setup' dictionary anywhere were a device may occur
+ # update it with the response from 'tformer'
+ for origname in disk_setup.keys():
+ transformed = tformer(origname)
+ if transformed is None or transformed == origname:
+ continue
+ if transformed in disk_setup:
+ LOG.info("Replacing %s in disk_setup for translation of %s",
+ origname, transformed)
+ del disk_setup[transformed]
+
+ disk_setup[transformed] = disk_setup[origname]
+ disk_setup[transformed]['_origname'] = origname
+ del disk_setup[origname]
+ LOG.debug("updated disk_setup device entry '%s' to '%s'",
+ origname, transformed)
+
+
+def update_fs_setup_devices(disk_setup, tformer):
+ # update 'fs_setup' dictionary anywhere were a device may occur
+ # update it with the response from 'tformer'
+ for definition in disk_setup:
+ if not isinstance(definition, dict):
+ LOG.warn("entry in disk_setup not a dict: %s", definition)
+ continue
+
+ origname = definition.get('device')
+
+ if origname is None:
+ continue
+
+ (dev, part) = util.expand_dotted_devname(origname)
+
+ tformed = tformer(dev)
+ if tformed is not None:
+ dev = tformed
+ LOG.debug("%s is mapped to disk=%s part=%s",
+ origname, tformed, part)
+ definition['_origname'] = origname
+ definition['device'] = tformed
+
+ if part and 'partition' in definition:
+ definition['_partition'] = definition['partition']
+ definition['partition'] = part
+
+
+def value_splitter(values, start=None):
+ """
+ Returns the key/value pairs of output sent as string
+ like: FOO='BAR' HOME='127.0.0.1'
+ """
+ _values = shlex.split(values)
+ if start:
+ _values = _values[start:]
+
+ for key, value in [x.split('=') for x in _values]:
+ yield key, value
+
+
+def enumerate_disk(device, nodeps=False):
+ """
+ Enumerate the elements of a child device.
+
+ Parameters:
+ device: the kernel device name
+ nodeps <BOOL>: don't enumerate children devices
+
+ Return a dict describing the disk:
+ type: the entry type, i.e disk or part
+ fstype: the filesystem type, if it exists
+ label: file system label, if it exists
+ name: the device name, i.e. sda
+ """
+
+ lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL',
+ device]
+
+ if nodeps:
+ lsblk_cmd.append('--nodeps')
+
+ info = None
+ try:
+ info, _err = util.subp(lsblk_cmd)
+ except Exception as e:
+ raise Exception("Failed during disk check for %s\n%s" % (device, e))
+
+ parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
+
+ for part in parts:
+ d = {'name': None,
+ 'type': None,
+ 'fstype': None,
+ 'label': None,
+ }
+
+ for key, value in value_splitter(part):
+ d[key.lower()] = value
+
+ yield d
+
+
+def device_type(device):
+ """
+ Return the device type of the device by calling lsblk.
+ """
+
+ for d in enumerate_disk(device, nodeps=True):
+ if "type" in d:
+ return d["type"].lower()
+ return None
+
+
+def is_device_valid(name, partition=False):
+ """
+ Check if the device is a valid device.
+ """
+ d_type = ""
+ try:
+ d_type = device_type(name)
+ except:
+ LOG.warn("Query against device %s failed" % name)
+ return False
+
+ if partition and d_type == 'part':
+ return True
+ elif not partition and d_type == 'disk':
+ return True
+ return False
+
+
+def check_fs(device):
+ """
+ Check if the device has a filesystem on it
+
+ Output of blkid is generally something like:
+ /dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
+
+ Return values are device, label, type, uuid
+ """
+ out, label, fs_type, uuid = None, None, None, None
+
+ blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
+ try:
+ out, _err = util.subp(blkid_cmd, rcs=[0, 2])
+ except Exception as e:
+ raise Exception("Failed during disk check for %s\n%s" % (device, e))
+
+ if out:
+ if len(out.splitlines()) == 1:
+ for key, value in value_splitter(out, start=1):
+ if key.lower() == 'label':
+ label = value
+ elif key.lower() == 'type':
+ fs_type = value
+ elif key.lower() == 'uuid':
+ uuid = value
+
+ return label, fs_type, uuid
+
+
+def is_filesystem(device):
+ """
+ Returns true if the device has a file system.
+ """
+ _, fs_type, _ = check_fs(device)
+ return fs_type
+
+
+def find_device_node(device, fs_type=None, label=None, valid_targets=None,
+ label_match=True, replace_fs=None):
+ """
+ Find a device that is either matches the spec, or the first
+
+ The return is value is (<device>, <bool>) where the device is the
+ device to use and the bool is whether the device matches the
+ fs_type and label.
+
+ Note: This works with GPT partition tables!
+ """
+ # label of None is same as no label
+ if label is None:
+ label = ""
+
+ if not valid_targets:
+ valid_targets = ['disk', 'part']
+
+ raw_device_used = False
+ for d in enumerate_disk(device):
+
+ if d['fstype'] == replace_fs and label_match is False:
+ # We found a device where we want to replace the FS
+ return ('/dev/%s' % d['name'], False)
+
+ if (d['fstype'] == fs_type and
+ ((label_match and d['label'] == label) or not label_match)):
+ # If we find a matching device, we return that
+ return ('/dev/%s' % d['name'], True)
+
+ if d['type'] in valid_targets:
+
+ if d['type'] != 'disk' or d['fstype']:
+ raw_device_used = True
+
+ if d['type'] == 'disk':
+ # Skip the raw disk, its the default
+ pass
+
+ elif not d['fstype']:
+ return ('/dev/%s' % d['name'], False)
+
+ if not raw_device_used:
+ return (device, False)
+
+ LOG.warn("Failed to find device during available device search.")
+ return (None, False)
+
+
+def is_disk_used(device):
+ """
+ Check if the device is currently used. Returns true if the device
+ has either a file system or a partition entry
+ is no filesystem found on the disk.
+ """
+
+ # If the child count is higher 1, then there are child nodes
+ # such as partition or device mapper nodes
+ use_count = [x for x in enumerate_disk(device)]
+ if len(use_count.splitlines()) > 1:
+ return True
+
+ # If we see a file system, then its used
+ _, check_fstype, _ = check_fs(device)
+ if check_fstype:
+ return True
+
+ return False
+
+
+def get_hdd_size(device):
+ """
+ Returns the hard disk size.
+ This works with any disk type, including GPT.
+ """
+
+ size_cmd = [SFDISK_CMD, '--show-size', device]
+ size = None
+ try:
+ size, _err = util.subp(size_cmd)
+ except Exception as e:
+ raise Exception("Failed to get %s size\n%s" % (device, e))
+
+ return int(size.strip())
+
+
+def get_dyn_func(*args):
+ """
+ Call the appropriate function.
+
+ The first value is the template for function name
+ The second value is the template replacement
+ The remain values are passed to the function
+
+ For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
+ would call "foo_bar" with args of 1, 2, 3
+ """
+ if len(args) < 2:
+ raise Exception("Unable to determine dynamic funcation name")
+
+ func_name = (args[0] % args[1])
+ func_args = args[2:]
+
+ try:
+ if func_args:
+ return globals()[func_name](*func_args)
+ else:
+ return globals()[func_name]
+
+ except KeyError:
+ raise Exception("No such function %s to call!" % func_name)
+
+
+def check_partition_mbr_layout(device, layout):
+ """
+ Returns true if the partition layout matches the one on the disk
+
+ Layout should be a list of values. At this time, this only
+ verifies that the number of partitions and their labels is correct.
+ """
+
+ read_parttbl(device)
+ prt_cmd = [SFDISK_CMD, "-l", device]
+ try:
+ out, _err = util.subp(prt_cmd, data="%s\n" % layout)
+ except Exception as e:
+ raise Exception("Error running partition command on %s\n%s" % (
+ device, e))
+
+ found_layout = []
+ for line in out.splitlines():
+ _line = line.split()
+ if len(_line) == 0:
+ continue
+
+ if device in _line[0]:
+ # We don't understand extended partitions yet
+ if _line[-1].lower() in ['extended', 'empty']:
+ continue
+
+ # Find the partition types
+ type_label = None
+ for x in sorted(range(1, len(_line)), reverse=True):
+ if _line[x].isdigit() and _line[x] != '/':
+ type_label = _line[x]
+ break
+
+ found_layout.append(type_label)
+
+ if isinstance(layout, bool):
+ # if we are using auto partitioning, or "True" be happy
+ # if a single partition exists.
+ if layout and len(found_layout) >= 1:
+ return True
+ return False
+
+ else:
+ if len(found_layout) != len(layout):
+ return False
+ else:
+ # This just makes sure that the number of requested
+ # partitions and the type labels are right
+ for x in range(1, len(layout) + 1):
+ if isinstance(layout[x - 1], tuple):
+ _, part_type = layout[x]
+ if int(found_layout[x]) != int(part_type):
+ return False
+ return True
+
+ return False
+
+
+def check_partition_layout(table_type, device, layout):
+ """
+ See if the partition lay out matches.
+
+ This is future a future proofing function. In order
+ to add support for other disk layout schemes, add a
+ function called check_partition_%s_layout
+ """
+ return get_dyn_func("check_partition_%s_layout", table_type, device,
+ layout)
+
+
+def get_partition_mbr_layout(size, layout):
+ """
+ Calculate the layout of the partition table. Partition sizes
+ are defined as percentage values or a tuple of percentage and
+ partition type.
+
+ For example:
+ [ 33, [66: 82] ]
+
+ Defines the first partition to be a size of 1/3 the disk,
+ while the remaining 2/3's will be of type Linux Swap.
+ """
+
+ if not isinstance(layout, list) and isinstance(layout, bool):
+ # Create a single partition
+ return "0,"
+
+ if ((len(layout) == 0 and isinstance(layout, list)) or
+ not isinstance(layout, list)):
+ raise Exception("Partition layout is invalid")
+
+ last_part_num = len(layout)
+ if last_part_num > 4:
+ raise Exception("Only simply partitioning is allowed.")
+
+ part_definition = []
+ part_num = 0
+ for part in layout:
+ part_type = 83 # Default to Linux
+ percent = part
+ part_num += 1
+
+ if isinstance(part, list):
+ if len(part) != 2:
+ raise Exception("Partition was incorrectly defined: %s" % part)
+ percent, part_type = part
+
+ part_size = int((float(size) * (float(percent) / 100)) / 1024)
+
+ if part_num == last_part_num:
+ part_definition.append(",,%s" % part_type)
+ else:
+ part_definition.append(",%s,%s" % (part_size, part_type))
+
+ sfdisk_definition = "\n".join(part_definition)
+ if len(part_definition) > 4:
+ raise Exception("Calculated partition definition is too big\n%s" %
+ sfdisk_definition)
+
+ return sfdisk_definition
+
+
+def purge_disk_ptable(device):
+ # wipe the first and last megabyte of a disk (or file)
+ # gpt stores partition table both at front and at end.
+ null = '\0' # pylint: disable=W1401
+ start_len = 1024 * 1024
+ end_len = 1024 * 1024
+ with open(device, "rb+") as fp:
+ fp.write(null * (start_len))
+ fp.seek(-end_len, os.SEEK_END)
+ fp.write(null * end_len)
+ fp.flush()
+
+ read_parttbl(device)
+
+
+def purge_disk(device):
+ """
+ Remove parition table entries
+ """
+
+ # wipe any file systems first
+ for d in enumerate_disk(device):
+ if d['type'] not in ["disk", "crypt"]:
+ wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
+ try:
+ LOG.info("Purging filesystem on /dev/%s" % d['name'])
+ util.subp(wipefs_cmd)
+ except Exception:
+ raise Exception("Failed FS purge of /dev/%s" % d['name'])
+
+ purge_disk_ptable(device)
+
+
+def get_partition_layout(table_type, size, layout):
+ """
+ Call the appropriate function for creating the table
+ definition. Returns the table definition
+
+ This is a future proofing function. To add support for
+ other layouts, simply add a "get_partition_%s_layout"
+ function.
+ """
+ return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
+
+
+def read_parttbl(device):
+ """
+ Use partprobe instead of 'udevadm'. Partprobe is the only
+ reliable way to probe the partition table.
+ """
+ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
+ udev_cmd = [UDEVADM_CMD, 'settle']
+ try:
+ util.subp(udev_cmd)
+ util.subp(blkdev_cmd)
+ util.subp(udev_cmd)
+ except Exception as e:
+ util.logexc(LOG, "Failed reading the partition table %s" % e)
+
+
+def exec_mkpart_mbr(device, layout):
+ """
+ Break out of mbr partition to allow for future partition
+ types, i.e. gpt
+ """
+ # Create the partitions
+ prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
+ try:
+ util.subp(prt_cmd, data="%s\n" % layout)
+ except Exception as e:
+ raise Exception("Failed to partition device %s\n%s" % (device, e))
+
+ read_parttbl(device)
+
+
+def exec_mkpart(table_type, device, layout):
+ """
+ Fetches the function for creating the table type.
+ This allows to dynamically find which function to call.
+
+ Paramaters:
+ table_type: type of partition table to use
+ device: the device to work on
+ layout: layout definition specific to partition table
+ """
+ return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
+
+
+def mkpart(device, definition):
+ """
+ Creates the partition table.
+
+ Parameters:
+ definition: dictionary describing how to create the partition.
+
+ The following are supported values in the dict:
+ overwrite: Should the partition table be created regardless
+ of any pre-exisiting data?
+ layout: the layout of the partition table
+ table_type: Which partition table to use, defaults to MBR
+ device: the device to work on.
+ """
+
+ LOG.debug("Checking values for %s definition" % device)
+ overwrite = definition.get('overwrite', False)
+ layout = definition.get('layout', False)
+ table_type = definition.get('table_type', 'mbr')
+
+ # Check if the default device is a partition or not
+ LOG.debug("Checking against default devices")
+
+ if (isinstance(layout, bool) and not layout) or not layout:
+ LOG.debug("Device is not to be partitioned, skipping")
+ return # Device is not to be partitioned
+
+ # This prevents you from overwriting the device
+ LOG.debug("Checking if device %s is a valid device", device)
+ if not is_device_valid(device):
+ raise Exception("Device %s is not a disk device!", device)
+
+ # Remove the partition table entries
+ if isinstance(layout, str) and layout.lower() == "remove":
+ LOG.debug("Instructed to remove partition table entries")
+ purge_disk(device)
+ return
+
+ LOG.debug("Checking if device layout matches")
+ if check_partition_layout(table_type, device, layout):
+ LOG.debug("Device partitioning layout matches")
+ return True
+
+ LOG.debug("Checking if device is safe to partition")
+ if not overwrite and (is_disk_used(device) or is_filesystem(device)):
+ LOG.debug("Skipping partitioning on configured device %s" % device)
+ return
+
+ LOG.debug("Checking for device size")
+ device_size = get_hdd_size(device)
+
+ LOG.debug("Calculating partition layout")
+ part_definition = get_partition_layout(table_type, device_size, layout)
+ LOG.debug(" Layout is: %s" % part_definition)
+
+ LOG.debug("Creating partition table on %s", device)
+ exec_mkpart(table_type, device, part_definition)
+
+ LOG.debug("Partition table created for %s", device)
+
+
+def lookup_force_flag(fs):
+ """
+ A force flag might be -F or -F, this look it up
+ """
+ flags = {'ext': '-F',
+ 'btrfs': '-f',
+ 'xfs': '-f',
+ 'reiserfs': '-f',
+ }
+
+ if 'ext' in fs.lower():
+ fs = 'ext'
+
+ if fs.lower() in flags:
+ return flags[fs]
+
+ LOG.warn("Force flag for %s is unknown." % fs)
+ return ''
+
+
+def mkfs(fs_cfg):
+ """
+ Create a file system on the device.
+
+ label: defines the label to use on the device
+ fs_cfg: defines how the filesystem is to look
+ The following values are required generally:
+ device: which device or cloud defined default_device
+ filesystem: which file system type
+ overwrite: indiscriminately create the file system
+ partition: when device does not define a partition,
+ setting this to a number will mean
+ device + partition. When set to 'auto', the
+ first free device or the first device which
+ matches both label and type will be used.
+
+ 'any' means the first filesystem that matches
+ on the device.
+
+ When 'cmd' is provided then no other parameter is required.
+ """
+ label = fs_cfg.get('label')
+ device = fs_cfg.get('device')
+ partition = str(fs_cfg.get('partition', 'any'))
+ fs_type = fs_cfg.get('filesystem')
+ fs_cmd = fs_cfg.get('cmd', [])
+ fs_opts = fs_cfg.get('extra_opts', [])
+ fs_replace = fs_cfg.get('replace_fs', False)
+ overwrite = fs_cfg.get('overwrite', False)
+
+ # This allows you to define the default ephemeral or swap
+ LOG.debug("Checking %s against default devices", device)
+
+ if not partition or partition.isdigit():
+ # Handle manual definition of partition
+ if partition.isdigit():
+ device = "%s%s" % (device, partition)
+ LOG.debug("Manual request of partition %s for %s",
+ partition, device)
+
+ # Check to see if the fs already exists
+ LOG.debug("Checking device %s", device)
+ check_label, check_fstype, _ = check_fs(device)
+ LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
+
+ if check_label == label and check_fstype == fs_type:
+ LOG.debug("Existing file system found at %s", device)
+
+ if not overwrite:
+ LOG.debug("Device %s has required file system", device)
+ return
+ else:
+ LOG.warn("Destroying filesystem on %s", device)
+
+ else:
+ LOG.debug("Device %s is cleared for formating", device)
+
+ elif partition and str(partition).lower() in ('auto', 'any'):
+ # For auto devices, we match if the filesystem does exist
+ odevice = device
+ LOG.debug("Identifying device to create %s filesytem on", label)
+
+ # any mean pick the first match on the device with matching fs_type
+ label_match = True
+ if partition.lower() == 'any':
+ label_match = False
+
+ device, reuse = find_device_node(device, fs_type=fs_type, label=label,
+ label_match=label_match,
+ replace_fs=fs_replace)
+ LOG.debug("Automatic device for %s identified as %s", odevice, device)
+
+ if reuse:
+ LOG.debug("Found filesystem match, skipping formating.")
+ return
+
+ if not reuse and fs_replace and device:
+ LOG.debug("Replacing file system on %s as instructed." % device)
+
+ if not device:
+ LOG.debug("No device aviable that matches request. "
+ "Skipping fs creation for %s", fs_cfg)
+ return
+ elif not partition or str(partition).lower() == 'none':
+ LOG.debug("Using the raw device to place filesystem %s on" % label)
+
+ else:
+ LOG.debug("Error in device identification handling.")
+ return
+
+ LOG.debug("File system %s will be created on %s", label, device)
+
+ # Make sure the device is defined
+ if not device:
+ LOG.warn("Device is not known: %s", device)
+ return
+
+ # Check that we can create the FS
+ if not (fs_type or fs_cmd):
+ raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
+ "must be set.", label)
+
+ # Create the commands
+ if fs_cmd:
+ fs_cmd = fs_cfg['cmd'] % {'label': label,
+ 'filesystem': fs_type,
+ 'device': device,
+ }
+ else:
+ # Find the mkfs command
+ mkfs_cmd = util.which("mkfs.%s" % fs_type)
+ if not mkfs_cmd:
+ mkfs_cmd = util.which("mk%s" % fs_type)
+
+ if not mkfs_cmd:
+ LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
+ fs_type)
+ return
+
+ fs_cmd = [mkfs_cmd, device]
+
+ if label:
+ fs_cmd.extend(["-L", label])
+
+ # File systems that support the -F flag
+ if not fs_cmd and (overwrite or device_type(device) == "disk"):
+ fs_cmd.append(lookup_force_flag(fs_type))
+
+ # Add the extends FS options
+ if fs_opts:
+ fs_cmd.extend(fs_opts)
+
+ LOG.debug("Creating file system %s on %s", label, device)
+ LOG.debug(" Using cmd: %s", " ".join(fs_cmd))
+ try:
+ util.subp(fs_cmd)
+ except Exception as e:
+ raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index 6b864fda..e92cba4a 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -54,7 +54,7 @@ def handle(_name, cfg, cloud, log, args):
'datasource': str(cloud.datasource),
}
util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
- console=False, stderr=True)
+ console=False, stderr=True, log=log)
except Exception:
util.logexc(log, "Failed to render final message template")
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 2d54aabf..0dd92a46 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -32,6 +32,7 @@ frequency = PER_ALWAYS
DEFAULT_CONFIG = {
'mode': 'auto',
'devices': ['/'],
+ 'ignore_growroot_disabled': False,
}
@@ -251,6 +252,12 @@ def handle(_name, cfg, _cloud, log, _args):
log.debug("growpart disabled: mode=%s" % mode)
return
+ if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
+ if os.path.isfile("/etc/growroot-disabled"):
+ log.debug("growpart disabled: /etc/growroot-disabled exists")
+ log.debug("use ignore_growroot_disabled to ignore")
+ return
+
devices = util.get_cfg_option_list(cfg, "devices", ["/"])
if not len(devices):
log.debug("growpart: empty device list")
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 390ba711..84ec928f 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -20,6 +20,8 @@
from string import whitespace # pylint: disable=W0402
+import logging
+import os.path
import re
from cloudinit import type_utils
@@ -31,6 +33,8 @@ SHORTNAME = re.compile(SHORTNAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
+LOG = logging.getLogger(__name__)
+
def is_mdname(name):
# return true if this is a metadata service name
@@ -44,6 +48,33 @@ def is_mdname(name):
return False
+def sanitize_devname(startname, transformer, log):
+ log.debug("Attempting to determine the real name of %s", startname)
+
+ # workaround, allow user to specify 'ephemeral'
+ # rather than more ec2 correct 'ephemeral0'
+ devname = startname
+ if devname == "ephemeral":
+ devname = "ephemeral0"
+ log.debug("Adjusted mount option from ephemeral to ephemeral0")
+
+ (blockdev, part) = util.expand_dotted_devname(devname)
+
+ if is_mdname(blockdev):
+ orig = blockdev
+ blockdev = transformer(blockdev)
+ if not blockdev:
+ return None
+ if not blockdev.startswith("/"):
+ blockdev = "/dev/%s" % blockdev
+ log.debug("Mapped metadata name %s to %s", orig, blockdev)
+ else:
+ if SHORTNAME.match(startname):
+ blockdev = "/dev/%s" % blockdev
+
+ return devnode_for_dev_part(blockdev, part)
+
+
def handle(_name, cfg, cloud, log, _args):
# fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"]
@@ -64,32 +95,15 @@ def handle(_name, cfg, cloud, log, _args):
(i + 1), type_utils.obj_name(cfgmnt[i]))
continue
- startname = str(cfgmnt[i][0])
- log.debug("Attempting to determine the real name of %s", startname)
-
- # workaround, allow user to specify 'ephemeral'
- # rather than more ec2 correct 'ephemeral0'
- if startname == "ephemeral":
- cfgmnt[i][0] = "ephemeral0"
- log.debug(("Adjusted mount option %s "
- "name from ephemeral to ephemeral0"), (i + 1))
-
- if is_mdname(startname):
- newname = cloud.device_name_to_device(startname)
- if not newname:
- log.debug("Ignoring nonexistant named mount %s", startname)
- cfgmnt[i][1] = None
- else:
- renamed = newname
- if not newname.startswith("/"):
- renamed = "/dev/%s" % newname
- cfgmnt[i][0] = renamed
- log.debug("Mapped metadata name %s to %s", startname, renamed)
- else:
- if SHORTNAME.match(startname):
- renamed = "/dev/%s" % startname
- log.debug("Mapped shortname name %s to %s", startname, renamed)
- cfgmnt[i][0] = renamed
+ start = str(cfgmnt[i][0])
+ sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ if sanitized is None:
+ log.debug("Ignorming nonexistant named mount %s", start)
+ continue
+
+ if sanitized != start:
+ log.debug("changed %s => %s" % (start, sanitized))
+ cfgmnt[i][0] = sanitized
# in case the user did not quote a field (likely fs-freq, fs_passno)
# but do not convert None to 'None' (LP: #898365)
@@ -118,17 +132,14 @@ def handle(_name, cfg, cloud, log, _args):
# for each of the "default" mounts, add them only if no other
# entry has the same device name
for defmnt in defmnts:
- startname = defmnt[0]
- devname = cloud.device_name_to_device(startname)
- if devname is None:
- log.debug("Ignoring nonexistant named default mount %s", startname)
+ start = defmnt[0]
+ sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ if sanitized is None:
+ log.debug("Ignoring nonexistant default named mount %s", start)
continue
- if devname.startswith("/"):
- defmnt[0] = devname
- else:
- defmnt[0] = "/dev/%s" % devname
-
- log.debug("Mapped default device %s to %s", startname, defmnt[0])
+ if sanitized != start:
+ log.debug("changed default device %s => %s" % (start, sanitized))
+ defmnt[0] = sanitized
cfgmnt_has = False
for cfgm in cfgmnt:
@@ -138,7 +149,7 @@ def handle(_name, cfg, cloud, log, _args):
if cfgmnt_has:
log.debug(("Not including %s, already"
- " previously included"), startname)
+ " previously included"), start)
continue
cfgmnt.append(defmnt)
@@ -198,3 +209,53 @@ def handle(_name, cfg, cloud, log, _args):
util.subp(("mount", "-a"))
except:
util.logexc(log, "Activating mounts via 'mount -a' failed")
+
+
+def devnode_for_dev_part(device, partition):
+ """
+ Find the name of the partition. While this might seem rather
+ straight forward, its not since some devices are '<device><partition>'
+ while others are '<device>p<partition>'. For example, /dev/xvda3 on EC2
+ will present as /dev/xvda3p1 for the first partition since /dev/xvda3 is
+ a block device.
+ """
+ if not os.path.exists(device):
+ return None
+
+ if not partition:
+ return device
+
+ short_name = os.path.basename(device)
+ sys_path = "/sys/block/%s" % short_name
+
+ if not os.path.exists(sys_path):
+ LOG.debug("did not find entry for %s in /sys/block", short_name)
+ return None
+
+ sys_long_path = sys_path + "/" + short_name
+
+ if partition is not None:
+ partition = str(partition)
+
+ if partition is None:
+ valid_mappings = [sys_long_path + "1",
+ sys_long_path + "p1" % partition]
+ elif partition != "0":
+ valid_mappings = [sys_long_path + "%s" % partition,
+ sys_long_path + "p%s" % partition]
+ else:
+ valid_mappings = []
+
+ for cdisk in valid_mappings:
+ if not os.path.exists(cdisk):
+ continue
+
+ dev_path = "/dev/%s" % os.path.basename(cdisk)
+ if os.path.exists(dev_path):
+ return dev_path
+
+ if partition is None or partition == "0":
+ return device
+
+ LOG.debug("Did not fine partition %s for device %s", partition, device)
+ return None
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
new file mode 100644
index 00000000..22a31f29
--- /dev/null
+++ b/cloudinit/config/cc_seed_random.py
@@ -0,0 +1,61 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2013 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import base64
+from StringIO import StringIO
+
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+
+frequency = PER_INSTANCE
+
+
+def _decode(data, encoding=None):
+ if not data:
+ return ''
+ if not encoding or encoding.lower() in ['raw']:
+ return data
+ elif encoding.lower() in ['base64', 'b64']:
+ return base64.b64decode(data)
+ elif encoding.lower() in ['gzip', 'gz']:
+ return util.decomp_gzip(data, quiet=False)
+ else:
+ raise IOError("Unknown random_seed encoding: %s" % (encoding))
+
+
+def handle(name, cfg, cloud, log, _args):
+ if not cfg or "random_seed" not in cfg:
+ log.debug(("Skipping module named %s, "
+ "no 'random_seed' configuration found"), name)
+ return
+
+ my_cfg = cfg['random_seed']
+ seed_path = my_cfg.get('file', '/dev/urandom')
+ seed_buf = StringIO()
+ seed_buf.write(_decode(my_cfg.get('data', ''),
+ encoding=my_cfg.get('encoding')))
+
+ metadata = cloud.datasource.metadata
+ if metadata and 'random_seed' in metadata:
+ seed_buf.write(metadata['random_seed'])
+
+ seed_data = seed_buf.getvalue()
+ if len(seed_data):
+ log.debug("%s: adding %s bytes of random seed entrophy to %s", name,
+ len(seed_data), seed_path)
+ util.append_file(seed_path, seed_data)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index c38bcea2..be8083db 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -63,7 +63,7 @@ def _is_printable_key(entry):
def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
prefix='ci-info: '):
if not key_entries:
- message = ("%sno authorized ssh keys fingerprints found for user %s."
+ message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
% (prefix, user))
util.multi_log(message)
return
diff --git a/cloudinit/distros/baserock.py b/cloudinit/distros/baserock.py
new file mode 100644
index 00000000..fb9ed92b
--- /dev/null
+++ b/cloudinit/distros/baserock.py
@@ -0,0 +1,127 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
+ '--option=Dpkg::options::=--force-unsafe-io',
+ '--assume-yes', '--quiet')
+
+
+class Distro(distros.Distro):
+ hostname_conf_fn = "/etc/hostname"
+ network_conf_fn = "/etc/network/interfaces"
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'debian'
+
+ def apply_locale(self, locale, out_fn=None):
+ pass
+
+ def install_packages(self, pkglist):
+ pass
+
+ def _write_network(self, settings):
+ util.write_file(self.network_conf_fn, settings)
+ return ['all']
+
+ def _bring_up_interfaces(self, device_names):
+ use_all = False
+ for d in device_names:
+ if d == 'all':
+ use_all = True
+ if use_all:
+ return distros.Distro._bring_up_interface(self, '--all')
+ else:
+ return distros.Distro._bring_up_interfaces(self, device_names)
+
+ def _select_hostname(self, hostname, fqdn):
+ # Prefer the short hostname over the long
+ # fully qualified domain name
+ if not hostname:
+ return fqdn
+ return hostname
+
+ def _write_hostname(self, your_hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(your_hostname)
+ util.write_file(out_fn, str(conf), 0644)
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
+
+ def _read_hostname(self, filename, default=None):
+ hostname = None
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def _get_localhost_ip(self):
+ # Note: http://www.leonardoborda.com/blog/127-0-1-1-ubuntu-debian/
+ return "127.0.1.1"
+
+ def set_timezone(self, tz):
+ pass
+
+ def package_command(self, command, args=None, pkgs=None):
+ pass
+
+ def update_package_sources(self):
+ pass
+
+ def get_primary_arch(self):
+ pass
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 1c46efde..e5eac6a7 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -292,11 +292,16 @@ class ContentHandlers(object):
def is_registered(self, content_type):
return content_type in self.registered
- def register(self, mod, initialized=False):
+ def register(self, mod, initialized=False, overwrite=True):
types = set()
for t in mod.list_types():
+ if overwrite:
+ types.add(t)
+ else:
+ if not self.is_registered(t):
+ types.add(t)
+ for t in types:
self.registered[t] = mod
- types.add(t)
if initialized and mod not in self.initialized:
self.initialized.append(mod)
return types
@@ -310,15 +315,6 @@ class ContentHandlers(object):
def iteritems(self):
return self.registered.iteritems()
- def register_defaults(self, defs):
- registered = set()
- for mod in defs:
- for t in mod.list_types():
- if not self.is_registered(t):
- self.registered[t] = mod
- registered.add(t)
- return registered
-
class Paths(object):
def __init__(self, path_cfgs, ds=None):
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 9f6badae..5df7f557 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -31,6 +31,7 @@ CFG_BUILTIN = {
'datasource_list': [
'NoCloud',
'ConfigDrive',
+ 'OpenNebula',
'Azure',
'AltCloud',
'OVF',
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 66d7728b..8321dee0 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -44,8 +44,21 @@ BUILTIN_DS_CONFIG = {
'policy': True,
'command': BOUNCE_COMMAND,
'hostname_command': 'hostname',
- }
+ },
+ 'disk_aliases': {'ephemeral0': '/dev/sdb'},
}
+
+BUILTIN_CLOUD_CONFIG = {
+ 'disk_setup': {
+ 'ephemeral0': {'table_type': 'mbr',
+ 'layout': True,
+ 'overwrite': False}
+ },
+ 'fs_setup': [{'filesystem': 'ext4',
+ 'device': 'ephemeral0.1',
+ 'replace_fs': 'ntfs'}]
+}
+
DS_CFG_PATH = ['datasource', DS_NAME]
@@ -94,7 +107,7 @@ class DataSourceAzureNet(sources.DataSource):
(md, self.userdata_raw, cfg, files) = ret
self.seed = cdev
self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = cfg
+ self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
found = cdev
LOG.debug("found datasource in %s", cdev)
@@ -106,9 +119,14 @@ class DataSourceAzureNet(sources.DataSource):
if found == ddir:
LOG.debug("using files cached in %s", ddir)
+ # azure / hyper-v provides random data here
+ seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
+ if seed:
+ self.metadata['random_seed'] = seed
+
# now update ds_cfg to reflect contents pass in config
- usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
- self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg])
+ user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
+ self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
mycfg = self.ds_cfg
# walinux agent writes files world readable, but expects
@@ -156,9 +174,11 @@ class DataSourceAzureNet(sources.DataSource):
pubkeys = pubkeys_from_crt_files(fp_files)
self.metadata['public-keys'] = pubkeys
-
return True
+ def device_name_to_device(self, name):
+ return self.ds_cfg['disk_aliases'].get(name)
+
def get_config_obj(self):
return self.cfg
@@ -344,7 +364,7 @@ def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
except Exception as e:
- raise NonAzureDataSource("invalid xml: %s" % e)
+ raise BrokenAzureDataSource("invalid xml: %s" % e)
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 835f2a9a..4f437244 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import base64
import json
import os
@@ -41,6 +42,25 @@ DEFAULT_METADATA = {
VALID_DSMODES = ("local", "net", "pass", "disabled")
+class ConfigDriveHelper(object):
+ def __init__(self, distro):
+ self.distro = distro
+
+ def on_first_boot(self, data):
+ if not data:
+ data = {}
+ if 'network_config' in data:
+ LOG.debug("Updating network interfaces from config drive")
+ self.distro.apply_network(data['network_config'])
+ files = data.get('files')
+ if files:
+ LOG.debug("Writing %s injected files", len(files))
+ try:
+ write_files(files)
+ except IOError:
+ util.logexc(LOG, "Failed writing files")
+
+
class DataSourceConfigDrive(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -49,6 +69,7 @@ class DataSourceConfigDrive(sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
+ self.helper = ConfigDriveHelper(distro)
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -187,20 +208,8 @@ class DataSourceConfigDrive(sources.DataSource):
# instance-id
prev_iid = get_previous_iid(self.paths)
cur_iid = md['instance-id']
-
- if ('network_config' in results and self.dsmode == "local" and
- prev_iid != cur_iid):
- LOG.debug("Updating network interfaces from config drive (%s)",
- dsmode)
- self.distro.apply_network(results['network_config'])
-
- # file writing occurs in local mode (to be as early as possible)
- if self.dsmode == "local" and prev_iid != cur_iid and results['files']:
- LOG.debug("writing injected files")
- try:
- write_files(results['files'])
- except:
- util.logexc(LOG, "Failed writing files")
+ if prev_iid != cur_iid and self.dsmode == "local":
+ self.helper.on_first_boot(results)
# dsmode != self.dsmode here if:
# * dsmode = "pass", pass means it should only copy files and then
@@ -338,6 +347,13 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
except KeyError:
raise BrokenConfigDriveDir("No uuid entry in metadata")
+ if 'random_seed' in results['metadata']:
+ random_seed = results['metadata']['random_seed']
+ try:
+ results['metadata']['random_seed'] = base64.b64decode(random_seed)
+ except (ValueError, TypeError) as exc:
+ raise BrokenConfigDriveDir("Badly formatted random_seed: %s" % exc)
+
def read_content_path(item):
# do not use os.path.join here, as content_path starts with /
cpath = os.path.sep.join((source_dir, "openstack",
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
new file mode 100644
index 00000000..07dc25ff
--- /dev/null
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -0,0 +1,442 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2012-2013 CERIT Scientific Cloud
+# Copyright (C) 2012-2013 OpenNebula.org
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Vlastimil Holer <xholer@mail.muni.cz>
+# Author: Javier Fontan <jfontan@opennebula.org>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pwd
+import re
+import string # pylint: disable=W0402
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_IID = "iid-dsopennebula"
+DEFAULT_MODE = 'net'
+DEFAULT_PARSEUSER = 'nobody'
+CONTEXT_DISK_FILES = ["context.sh"]
+VALID_DSMODES = ("local", "net", "disabled")
+
+
+class DataSourceOpenNebula(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'local'
+ self.seed = None
+ self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
+
+ def get_data(self):
+ defaults = {"instance-id": DEFAULT_IID}
+ results = None
+ seed = None
+
+ # decide parseuser for context.sh shell reader
+ parseuser = DEFAULT_PARSEUSER
+ if 'parseuser' in self.ds_cfg:
+ parseuser = self.ds_cfg.get('parseuser')
+
+ candidates = [self.seed_dir]
+ candidates.extend(find_candidate_devs())
+ for cdev in candidates:
+ try:
+ if os.path.isdir(self.seed_dir):
+ results = read_context_disk_dir(cdev, asuser=parseuser)
+ elif cdev.startswith("/dev"):
+ results = util.mount_cb(cdev, read_context_disk_dir,
+ data=parseuser)
+ except NonContextDiskDir:
+ continue
+ except BrokenContextDiskDir as exc:
+ raise exc
+ except util.MountFailedError:
+ LOG.warn("%s was not mountable" % cdev)
+
+ if results:
+ seed = cdev
+ LOG.debug("found datasource in %s", cdev)
+ break
+
+ if not seed:
+ return False
+
+ # merge fetched metadata with datasource defaults
+ md = results['metadata']
+ md = util.mergemanydict([md, defaults])
+
+ # check for valid user specified dsmode
+ user_dsmode = results['metadata'].get('DSMODE', None)
+ if user_dsmode not in VALID_DSMODES + (None,):
+ LOG.warn("user specified invalid mode: %s", user_dsmode)
+ user_dsmode = None
+
+ # decide dsmode
+ if user_dsmode:
+ dsmode = user_dsmode
+ elif self.ds_cfg.get('dsmode'):
+ dsmode = self.ds_cfg.get('dsmode')
+ else:
+ dsmode = DEFAULT_MODE
+
+ if dsmode == "disabled":
+ # most likely user specified
+ return False
+
+ # apply static network configuration only in 'local' dsmode
+ if ('network-interfaces' in results and self.dsmode == "local"):
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(results['network-interfaces'])
+
+ if dsmode != self.dsmode:
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+ return False
+
+ self.seed = seed
+ self.metadata = md
+ self.userdata_raw = results.get('userdata')
+ return True
+
+ def get_hostname(self, fqdn=False, resolve_ip=None):
+ if resolve_ip is None:
+ if self.dsmode == 'net':
+ resolve_ip = True
+ else:
+ resolve_ip = False
+ return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
+
+
+class DataSourceOpenNebulaNet(DataSourceOpenNebula):
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceOpenNebula.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'net'
+
+
+class NonContextDiskDir(Exception):
+ pass
+
+
+class BrokenContextDiskDir(Exception):
+ pass
+
+
+class OpenNebulaNetwork(object):
+ REG_DEV_MAC = re.compile(
+ r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
+ re.MULTILINE | re.DOTALL)
+
+ def __init__(self, ip, context):
+ self.ip = ip
+ self.context = context
+ self.ifaces = self.get_ifaces()
+
+ def get_ifaces(self):
+ return self.REG_DEV_MAC.findall(self.ip)
+
+ def mac2ip(self, mac):
+ components = mac.split(':')[2:]
+ return [str(int(c, 16)) for c in components]
+
+ def get_ip(self, dev, components):
+ var_name = dev.upper() + '_IP'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return '.'.join(components)
+
+ def get_mask(self, dev):
+ var_name = dev.upper() + '_MASK'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return '255.255.255.0'
+
+ def get_network(self, dev, components):
+ var_name = dev.upper() + '_NETWORK'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return '.'.join(components[:-1]) + '.0'
+
+ def get_gateway(self, dev):
+ var_name = dev.upper() + '_GATEWAY'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return None
+
+ def get_dns(self, dev):
+ var_name = dev.upper() + '_DNS'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return None
+
+ def get_domain(self, dev):
+ var_name = dev.upper() + '_DOMAIN'
+ if var_name in self.context:
+ return self.context[var_name]
+ else:
+ return None
+
+ def gen_conf(self):
+ global_dns = []
+ if 'DNS' in self.context:
+ global_dns.append(self.context['DNS'])
+
+ conf = []
+ conf.append('auto lo')
+ conf.append('iface lo inet loopback')
+ conf.append('')
+
+ for i in self.ifaces:
+ dev = i[0]
+ mac = i[1]
+ ip_components = self.mac2ip(mac)
+
+ conf.append('auto ' + dev)
+ conf.append('iface ' + dev + ' inet static')
+ conf.append(' address ' + self.get_ip(dev, ip_components))
+ conf.append(' network ' + self.get_network(dev, ip_components))
+ conf.append(' netmask ' + self.get_mask(dev))
+
+ gateway = self.get_gateway(dev)
+ if gateway:
+ conf.append(' gateway ' + gateway)
+
+ domain = self.get_domain(dev)
+ if domain:
+ conf.append(' dns-search ' + domain)
+
+ # add global DNS servers to all interfaces
+ dns = self.get_dns(dev)
+ if global_dns or dns:
+ all_dns = global_dns
+ if dns:
+ all_dns.append(dns)
+ conf.append(' dns-nameservers ' + ' '.join(all_dns))
+
+ conf.append('')
+
+ return "\n".join(conf)
+
+
+def find_candidate_devs():
+ """
+ Return a list of devices that may contain the context disk.
+ """
+ combined = []
+ for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
+ devs = util.find_devs_with(f)
+ devs.sort()
+ for d in devs:
+ if d not in combined:
+ combined.append(d)
+
+ return combined
+
+
+def switch_user_cmd(user):
+ return ['sudo', '-u', user]
+
+
+def parse_shell_config(content, keylist=None, bash=None, asuser=None,
+ switch_user_cb=None):
+
+ if isinstance(bash, str):
+ bash = [bash]
+ elif bash is None:
+ bash = ['bash', '-e']
+
+ if switch_user_cb is None:
+ switch_user_cb = switch_user_cmd
+
+ # allvars expands to all existing variables by using '${!x*}' notation
+ # where x is lower or upper case letters or '_'
+ allvars = ["${!%s*}" % x for x in string.letters + "_"]
+
+ keylist_in = keylist
+ if keylist is None:
+ keylist = allvars
+ keylist_in = []
+
+ setup = '\n'.join(('__v="";', '',))
+
+ def varprinter(vlist):
+ # output '\0'.join(['_start_', key=value NULL for vars in vlist]
+ return '\n'.join((
+ 'printf "%s\\0" _start_',
+ 'for __v in %s; do' % ' '.join(vlist),
+ ' printf "%s=%s\\0" "$__v" "${!__v}";',
+ 'done',
+ ''
+ ))
+
+ # the rendered 'bcmd' is bash syntax that does
+ # setup: declare variables we use (so they show up in 'all')
+ # varprinter(allvars): print all variables known at beginning
+ # content: execute the provided content
+ # varprinter(keylist): print all variables known after content
+ #
+ # output is then a null terminated array of:
+ # literal '_start_'
+ # key=value (for each preset variable)
+ # literal '_start_'
+ # key=value (for each post set variable)
+ bcmd = ('unset IFS\n' +
+ setup +
+ varprinter(allvars) +
+ '{\n%s\n\n:\n} > /dev/null\n' % content +
+ 'unset IFS\n' +
+ varprinter(keylist) + "\n")
+
+ cmd = []
+ if asuser is not None:
+ cmd = switch_user_cb(asuser)
+
+ cmd.extend(bash)
+
+ (output, _error) = util.subp(cmd, data=bcmd)
+
+ # exclude vars in bash that change on their own or that we used
+ excluded = ("RANDOM", "LINENO", "_", "__v")
+ preset = {}
+ ret = {}
+ target = None
+ output = output[0:-1] # remove trailing null
+
+ # go through output. First _start_ is for 'preset', second for 'target'.
+ # Add to target only things were changed and not in volitile
+ for line in output.split("\x00"):
+ try:
+ (key, val) = line.split("=", 1)
+ if target is preset:
+ target[key] = val
+ elif (key not in excluded and
+ (key in keylist_in or preset.get(key) != val)):
+ ret[key] = val
+ except ValueError:
+ if line != "_start_":
+ raise
+ if target is None:
+ target = preset
+ elif target is preset:
+ target = ret
+
+ return ret
+
+
+def read_context_disk_dir(source_dir, asuser=None):
+ """
+ read_context_disk_dir(source_dir):
+ read source_dir and return a tuple with metadata dict and user-data
+ string populated. If not a valid dir, raise a NonContextDiskDir
+ """
+ found = {}
+ for af in CONTEXT_DISK_FILES:
+ fn = os.path.join(source_dir, af)
+ if os.path.isfile(fn):
+ found[af] = fn
+
+ if not found:
+ raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
+
+ context = {}
+ results = {'userdata': None, 'metadata': {}}
+
+ if "context.sh" in found:
+ if asuser is not None:
+ try:
+ pwd.getpwnam(asuser)
+ except KeyError as e:
+ raise BrokenContextDiskDir("configured user '%s' "
+ "does not exist", asuser)
+ try:
+ with open(os.path.join(source_dir, 'context.sh'), 'r') as f:
+ content = f.read().strip()
+
+ context = parse_shell_config(content, asuser=asuser)
+ except util.ProcessExecutionError as e:
+ raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
+ except IOError as e:
+ raise NonContextDiskDir("Error reading context.sh: %s" % (e))
+ else:
+ raise NonContextDiskDir("Missing context.sh")
+
+ if not context:
+ return results
+
+ results['metadata'] = context
+
+ # process single or multiple SSH keys
+ ssh_key_var = None
+ if "SSH_KEY" in context:
+ ssh_key_var = "SSH_KEY"
+ elif "SSH_PUBLIC_KEY" in context:
+ ssh_key_var = "SSH_PUBLIC_KEY"
+
+ if ssh_key_var:
+ lines = context.get(ssh_key_var).splitlines()
+ results['metadata']['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+ # custom hostname -- try hostname or leave cloud-init
+ # itself create hostname from IP address later
+ for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ if k in context:
+ results['metadata']['local-hostname'] = context[k]
+ break
+
+ # raw user data
+ if "USER_DATA" in context:
+ results['userdata'] = context["USER_DATA"]
+ elif "USERDATA" in context:
+ results['userdata'] = context["USERDATA"]
+
+ # generate static /etc/network/interfaces
+ # only if there are any required context variables
+ # http://opennebula.org/documentation:rel3.8:cong#network_configuration
+ for k in context.keys():
+ if re.match(r'^ETH\d+_IP$', k):
+ (out, _) = util.subp(['/sbin/ip', 'link'])
+ net = OpenNebulaNetwork(out, context)
+ results['network-interfaces'] = net.gen_conf()
+ break
+
+ return results
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOpenNebulaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index d348d20b..2b4386b7 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -35,8 +35,7 @@ import os
import os.path
import serial
-DEF_TTY_LOC = '/dev/ttyS1'
-DEF_TTY_TIMEOUT = 60
+
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
@@ -47,26 +46,66 @@ SMARTOS_ATTRIB_MAP = {
'user-data': ('user-data', False),
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
+ 'availability_zone': ('region', True),
}
-# These are values which will never be base64 encoded.
-# They come from the cloud platform, not user
-SMARTOS_NO_BASE64 = ['root_authorized_keys', 'motd_sys_info',
- 'iptables_disable']
+DS_NAME = 'SmartOS'
+DS_CFG_PATH = ['datasource', DS_NAME]
+# BUILT-IN DATASOURCE CONFIGURATION
+# The following is the built-in configuration. If the values
+# are not set via the system configuration, then these default
+# will be used:
+# serial_device: which serial device to use for the meta-data
+# seed_timeout: how long to wait on the device
+# no_base64_decode: values which are not base64 encoded and
+# are fetched directly from SmartOS, not meta-data values
+# base64_keys: meta-data keys that are delivered in base64
+# base64_all: with the exclusion of no_base64_decode values,
+# treat all meta-data as base64 encoded
+# disk_setup: describes how to partition the ephemeral drive
+# fs_setup: describes how to format the ephemeral drive
+#
+BUILTIN_DS_CONFIG = {
+ 'serial_device': '/dev/ttyS1',
+ 'seed_timeout': 60,
+ 'no_base64_decode': ['root_authorized_keys',
+ 'motd_sys_info',
+ 'iptables_disable'],
+ 'base64_keys': [],
+ 'base64_all': False,
+ 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+}
+
+BUILTIN_CLOUD_CONFIG = {
+ 'disk_setup': {
+ 'ephemeral0': {'table_type': 'mbr',
+ 'layout': False,
+ 'overwrite': False}
+ },
+ 'fs_setup': [{'label': 'ephemeral0',
+ 'filesystem': 'ext3',
+ 'device': 'ephemeral0'}],
+}
class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'sdc')
self.is_smartdc = None
- self.seed = self.ds_cfg.get("serial_device", DEF_TTY_LOC)
- self.seed_timeout = self.ds_cfg.get("serial_timeout", DEF_TTY_TIMEOUT)
- self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode',
- SMARTOS_NO_BASE64)
- self.b64_keys = self.ds_cfg.get('base64_keys', [])
- self.b64_all = self.ds_cfg.get('base64_all', False)
+ self.ds_cfg = util.mergemanydict([
+ self.ds_cfg,
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG])
+
+ self.metadata = {}
+ self.cfg = BUILTIN_CLOUD_CONFIG
+
+ self.seed = self.ds_cfg.get("serial_device")
+ self.seed_timeout = self.ds_cfg.get("serial_timeout")
+ self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
+ self.b64_keys = self.ds_cfg.get('base64_keys')
+ self.b64_all = self.ds_cfg.get('base64_all')
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -79,7 +118,6 @@ class DataSourceSmartOS(sources.DataSource):
if not os.path.exists(self.seed):
LOG.debug("Host does not appear to be on SmartOS")
return False
- self.seed = self.seed
dmi_info = dmi_data()
if dmi_info is False:
@@ -114,10 +152,16 @@ class DataSourceSmartOS(sources.DataSource):
elif md['user-script']:
ud = md['user-script']
- self.metadata = md
+ self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
return True
+ def device_name_to_device(self, name):
+ return self.ds_cfg['disk_aliases'].get(name)
+
+ def get_config_obj(self):
+ return self.cfg
+
def get_instance_id(self):
return self.metadata['instance-id']
@@ -230,7 +274,7 @@ def dmi_data():
except Exception as e:
util.logexc(LOG, "Failed to get system UUID", e)
- return sys_uuid.lower(), sys_type
+ return (sys_uuid.lower().strip(), sys_type.strip())
# Used to match classes to dependencies
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 974c0407..7dc1fbde 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -53,9 +53,16 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
+
+ # find the datasource config name.
+ # remove 'DataSource' from classname on front, and remove 'Net' on end.
+ # Both Foo and FooNet sources expect config in cfg['sources']['Foo']
name = type_utils.obj_name(self)
if name.startswith(DS_PREFIX):
name = name[len(DS_PREFIX):]
+ if name.endswith('Net'):
+ name = name[0:-3]
+
self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
("datasource", name), {})
if not ud_proc:
@@ -144,7 +151,7 @@ class DataSource(object):
return "iid-datasource"
return str(self.metadata['instance-id'])
- def get_hostname(self, fqdn=False):
+ def get_hostname(self, fqdn=False, resolve_ip=False):
defdomain = "localdomain"
defhost = "localhost"
domain = defdomain
@@ -168,7 +175,14 @@ class DataSource(object):
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
if util.is_ipv4(lhost):
- toks = ["ip-%s" % lhost.replace(".", "-")]
+ toks = []
+ if resolve_ip:
+ toks = util.gethostbyaddr(lhost)
+
+ if toks:
+ toks = str(toks).split('.')
+ else:
+ toks = ["ip-%s" % lhost.replace(".", "-")]
else:
toks = lhost.split(".")
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 3e49e8c5..07c55802 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -375,7 +375,9 @@ class Init(object):
mod = importer.import_module(mod_locs[0])
mod = handlers.fixup_handler(mod)
types = c_handlers.register(mod)
- LOG.debug("Added handler for %s from %s", types, fname)
+ if types:
+ LOG.debug("Added custom handler for %s from %s",
+ types, fname)
except Exception:
util.logexc(LOG, "Failed to register handler from %s",
fname)
@@ -386,10 +388,10 @@ class Init(object):
# Register any other handlers that come from the default set. This
# is done after the cloud-dir handlers so that the cdir modules can
# take over the default user-data handler content-types.
- def_handlers = self._default_userdata_handlers()
- applied_def_handlers = c_handlers.register_defaults(def_handlers)
- if applied_def_handlers:
- LOG.debug("Registered default handlers: %s", applied_def_handlers)
+ for mod in self._default_userdata_handlers():
+ types = c_handlers.register(mod, overwrite=False)
+ if types:
+ LOG.debug("Added default handler for %s from %s", types, mod)
# Form our cloud interface
data = self.cloudify()
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 4a74ba57..9e6e0a73 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -32,6 +32,7 @@ import grp
import gzip
import hashlib
import os
+import os.path
import platform
import pwd
import random
@@ -161,13 +162,13 @@ class SeLinuxGuard(object):
self.recursive = recursive
def __enter__(self):
- if self.selinux:
+ if self.selinux and self.selinux.is_selinux_enabled():
return True
else:
return False
def __exit__(self, excp_type, excp_value, excp_traceback):
- if self.selinux:
+ if self.selinux and self.selinux.is_selinux_enabled():
path = os.path.realpath(os.path.expanduser(self.path))
do_restore = False
try:
@@ -360,11 +361,21 @@ def multi_log(text, console=True, stderr=True,
if stderr:
sys.stderr.write(text)
if console:
- # Don't use the write_file since
- # this might be 'sensitive' info (not debug worthy?)
- with open('/dev/console', 'wb') as wfh:
- wfh.write(text)
- wfh.flush()
+ conpath = "/dev/console"
+ if os.path.exists(conpath):
+ with open(conpath, 'wb') as wfh:
+ wfh.write(text)
+ wfh.flush()
+ else:
+ # A container may lack /dev/console (arguably a container bug). If
+ # it does not exist, then write output to stdout. this will result
+ # in duplicate stderr and stdout messages if stderr was True.
+ #
+ # even though upstart or systemd might have set up output to go to
+ # /dev/console, the user may have configured elsewhere via
+ # cloud-config 'output'. If there is /dev/console, messages will
+ # still get there.
+ sys.stdout.write(text)
if log:
if text[-1] == "\n":
log.log(log_level, text[:-1])
@@ -955,6 +966,13 @@ def get_hostname():
return hostname
+def gethostbyaddr(ip):
+ try:
+ return socket.gethostbyaddr(ip)[0]
+ except socket.herror:
+ return None
+
+
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return (is_resolvable(urlparse.urlparse(url).hostname))
@@ -1791,17 +1809,29 @@ def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
ret = func(*args, **kwargs)
finally:
delta = time.time() - start
+ udelta = None
if ustart is not None:
try:
udelta = float(uptime()) - ustart
except ValueError:
- udelta = "N/A"
+ pass
tmsg = " took %0.3f seconds" % delta
if get_uptime:
- tmsg += "(%0.2f)" % udelta
+ if isinstance(udelta, (float)):
+ tmsg += " (%0.2f)" % udelta
+ else:
+ tmsg += " (N/A)"
try:
logfunc(msg + tmsg)
except:
pass
return ret
+
+
+def expand_dotted_devname(dotted):
+ toks = dotted.rsplit(".", 1)
+ if len(toks) > 1:
+ return toks
+ else:
+ return (dotted, None)
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 9d2adf1d..8504e682 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -1,64 +1,7 @@
-# configure where output will go
-# 'output' entry is a dict with 'init', 'config', 'final' or 'all'
-# entries. Each one defines where
-# cloud-init, cloud-config, cloud-config-final or all output will go
-# each entry in the dict can be a string, list or dict.
-# if it is a string, it refers to stdout and stderr
-# if it is a list, entry 0 is stdout, entry 1 is stderr
-# if it is a dict, it is expected to have 'output' and 'error' fields
-# default is to write to console only
-# the special entry "&1" for an error means "same location as stdout"
-# (Note, that '&1' has meaning in yaml, so it must be quoted)
-output:
- init: [ ">> /var/log/init.err", "> /var/log/init.log" ]
- config: [ ">> /var/log/config.err", "> /var/log/config.log" ]
- final: [ ">> /var/log/final.err", "> /var/log/final.log" ]
-
-#logging user and file
-def_log_file: /var/log/cloud-init-logging.log
-syslog_fix_perms: root:root
-
-runcmd:
- - /bin/echo 'Testing runcmd from cloud.cfg' > /root/file1.txt
-
-datasource:
- # ConfigDrive for Openstack
- Ec2:
- # timeout: the timeout value for a request at metadata service
- timeout : 50
- # The length in seconds to wait before giving up on the metadata
- # service. The actual total wait could be up to
- # len(resolvable_metadata_urls)*timeout
- max_wait : 120
- metadata_urls:
- - http://192.168.24.247:8773/services/Cloud
-
-
-
-# The top level settings are used as module
-# and system configuration.
-
-# A set of users which may be applied and/or used by various modules
-# when a 'default' entry is found it will reference the 'default_user'
-# from the distro configuration specified below
-#users:
-# - default
-# will get a message to login instead as the above $user (ubuntu)
-disable_root: false
-
-# This will cause the set+update hostname module to not operate (if true)
-preserve_hostname: false
-
-# Example datasource config
-# datasource:
-# Ec2:
-# metadata_urls: [ 'blah.com' ]
-# timeout: 5 # (defaults to 50 seconds)
-# max_wait: 10 # (defaults to 120 seconds)
-
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
+ - seed_random
- bootcmd
- write-files
- growpart
@@ -68,25 +11,21 @@ cloud_init_modules:
- update_etc_hosts
- ca-certs
- rsyslog
+# - users-groups
# The modules that run in the 'config' stage
cloud_config_modules:
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- ssh
- - emit_upstart
+ - disk_setup
- mounts
- ssh-import-id
- set-passwords
- - package-update-upgrade-install
- landscape
- puppet
- - chef
- - salt-minion
- - mcollective
- disable-ec2-metadata
- runcmd
- - byobu
# The modules that run in the 'final' stage
cloud_final_modules:
@@ -100,3 +39,18 @@ cloud_final_modules:
- final-message
- power-state-change
+system_info:
+ # This will affect which distro class gets used
+ distro: baserock
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: root
+ lock_passwd: false
+ gecos: Root
+ groups: [root, pulse-access]
+ shell: /bin/bash
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ ssh_svcname: ssh
diff --git a/config/cloud.cfg.d/baserock.cfg b/config/cloud.cfg.d/baserock.cfg
new file mode 100644
index 00000000..d0c4b9c4
--- /dev/null
+++ b/config/cloud.cfg.d/baserock.cfg
@@ -0,0 +1,19 @@
+datasource_list: [ NoCloud, ConfigDrive, AltCloud, OVF, MAAS, Ec2, None ]
+
+output:
+ init: [ ">> /var/log/init.err", "> /var/log/init.log" ]
+ config: [ ">> /var/log/config.err", "> /var/log/config.log" ]
+ final: [ ">> /var/log/final.err", "> /var/log/final.log" ]
+
+#logging user and file
+def_log_file: /var/log/cloud-init-logging.log
+syslog_fix_perms: root:root
+
+
+# The top level settings are used as module
+# and system configuration.
+
+# If this is set, 'root' will not be able to ssh in and they
+# will get a message to login instead as the above $user (ubuntu)
+disable_root: false
+
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
new file mode 100644
index 00000000..6ad61c33
--- /dev/null
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -0,0 +1,251 @@
+Cloud-init supports the creation of simple partition tables and file systems
+on devices.
+
+Default disk definitions for AWS
+--------------------------------
+(Not implemented yet, but provided for future documentation)
+
+ disk_setup:
+ ephmeral0:
+ type: 'mbr'
+ layout: True
+ overwrite: False
+
+ fs_setup:
+ - label: None,
+ filesystem: ext3
+ device: ephemeral0
+ partition: auto
+
+Default disk definitions for Windows Azure
+------------------------------------------
+
+device_aliases: {'ephemeral0': '/dev/sdb'}
+disk_setup:
+ ephemeral0:
+ type: mbr
+ layout: True
+ overwrite: False
+
+fs_setup:
+ - label: ephemeral0
+ filesystem: ext4
+ device: ephemeral0.1
+ replace_fs: ntfs
+
+
+Default disk definitions for SmartOS
+------------------------------------
+
+device_aliases: {'ephemeral0': '/dev/sdb'}
+disk_setup:
+ ephemeral0:
+ type: mbr
+ layout: False
+ overwrite: False
+
+fs_setup:
+ - label: ephemeral0
+ filesystem: ext3
+ device: ephemeral0.0
+
+Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
+ not be automatically added to the mounts.
+
+
+The default definition is used to make sure that the ephemeral storage is
+setup properly.
+
+"disk_setup": disk partitioning
+--------------------------------
+
+The disk_setup directive instructs Cloud-init to partition a disk. The format is:
+
+ disk_setup:
+ ephmeral0:
+ type: 'mbr'
+ layout: 'auto'
+ /dev/xvdh:
+ type: 'mbr'
+ layout:
+ - 33
+ - [33, 82]
+ - 33
+ overwrite: True
+
+The format is a list of dicts of dicts. The first value is the name of the
+device and the subsequent values define how to create and layout the partition.
+
+The general format is:
+ disk_setup:
+ <DEVICE>:
+ type: 'mbr'
+ layout: <LAYOUT|BOOL>
+ overwrite: <BOOL>
+
+Where:
+ <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
+ values which are specific to the cloud. For these devices
+ Cloud-init will look up what the real devices is and then
+ use it.
+
+ For other devices, the kernel device name is used. At this
+ time only simply kernel devices are supported, meaning
+ that device mapper and other targets may not work.
+
+ Note: At this time, there is no handling or setup of
+ device mapper targets.
+
+ type=<TYPE>: Currently the following are supported:
+ 'mbr': default and setups a MS-DOS partition table
+
+ Note: At this time only 'mbr' partition tables are allowed.
+ It is anticipated in the future that we'll have GPT as
+ option in the future, or even "RAID" to create a mdadm
+ RAID.
+
+ layout={...}: The device layout. This is a list of values, with the
+ percentage of disk that partition will take.
+ Valid options are:
+ [<SIZE>, [<SIZE>, <PART_TYPE]]
+
+ Where <SIZE> is the _percentage_ of the disk to use, while
+ <PART_TYPE> is the numerical value of the partition type.
+
+ The following setups two partitions, with the first
+ partition having a swap label, taking 1/3 of the disk space
+ and the remainder being used as the second partition.
+ /dev/xvdh':
+ type: 'mbr'
+ layout:
+ - [33,82]
+ - 66
+ overwrite: True
+
+ When layout is "true" it means single partition the entire
+ device.
+
+ When layout is "false" it means don't partition or ignore
+ existing partitioning.
+
+ If layout is set to "true" and overwrite is set to "false",
+ it will skip partitioning the device without a failure.
+
+ overwrite=<BOOL>: This describes whether to ride with saftey's on and
+ everything holstered.
+
+ 'false' is the default, which means that:
+ 1. The device will be checked for a partition table
+ 2. The device will be checked for a file system
+ 3. If either a partition of file system is found, then
+ the operation will be _skipped_.
+
+ 'true' is cowboy mode. There are no checks and things are
+ done blindly. USE with caution, you can do things you
+ really, really don't want to do.
+
+
+fs_setup: Setup the file system
+-------------------------------
+
+fs_setup describes the how the file systems are supposed to look.
+
+ fs_setup:
+ - label: ephemeral0
+ filesystem: 'ext3'
+ device: 'ephemeral0'
+ partition: 'auto'
+ - label: mylabl2
+ filesystem: 'ext4'
+ device: '/dev/xvda1'
+ - special:
+ cmd: mkfs -t %(FILESYSTEM)s -L %(LABEL)s %(DEVICE)s
+ filesystem: 'btrfs'
+ device: '/dev/xvdh'
+
+The general format is:
+ fs_setup:
+ - label: <LABEL>
+ filesystem: <FS_TYPE>
+ device: <DEVICE>
+ partition: <PART_VALUE>
+ overwrite: <OVERWRITE>
+ replace_fs: <FS_TYPE>
+
+Where:
+ <LABEL>: The file system label to be used. If set to None, no label is
+ used.
+
+ <FS_TYPE>: The file system type. It is assumed that the there
+ will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
+ Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
+ and vfat by default.
+
+ <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
+ are allowed and the actual device is acquired from the cloud datasource.
+ When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
+ label as 'ephemeralX' otherwise there may be issues with the mounting
+ of the ephemeral storage layer.
+
+ If you define the device as 'ephemeralX.Y' then Y will be interpetted
+ as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
+
+ <PART_VALUE>:
+ Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
+
+ The valid options are:
+ "auto|any": tell cloud-init not to care whether there is a partition
+ or not. Auto will use the first partition that does not contain a
+ file system already. In the absence of a partition table, it will
+ put it directly on the disk.
+
+ "auto": If a file system that matches the specification in terms of
+ label, type and device, then cloud-init will skip the creation of
+ the file system.
+
+ "any": If a file system that matches the file system type and device,
+ then cloud-init will skip the creation of the file system.
+
+ Devices are selected based on first-detected, starting with partitions
+ and then the raw disk. Consider the following:
+ NAME FSTYPE LABEL
+ xvdb
+ |-xvdb1 ext4
+ |-xvdb2
+ |-xvdb3 btrfs test
+ \-xvdb4 ext4 test
+
+ If you ask for 'auto', label of 'test, and file system of 'ext4'
+ then cloud-init will select the 2nd partition, even though there
+ is a partition match at the 4th partition.
+
+ If you ask for 'any' and a label of 'test', then cloud-init will
+ select the 1st partition.
+
+ If you ask for 'auto' and don't define label, then cloud-init will
+ select the 1st partition.
+
+ In general, if you have a specific partition configuration in mind,
+ you should define either the device or the partition number. 'auto'
+ and 'any' are specifically intended for formating ephemeral storage or
+ for simple schemes.
+
+ "none": Put the file system directly on the device.
+
+ <NUM>: where NUM is the actual partition number.
+
+ <OVERWRITE>: Defines whether or not to overwrite any existing
+ filesystem.
+
+ "true": Indiscriminately destroy any pre-existing file system. Use at
+ your own peril.
+
+ "false": If an existing file system exists, skip the creation.
+
+ <REPLACE_FS>: This is a special directive, used for Windows Azure that
+ instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
+ unless you define a label, this requires the use of the 'any' partition
+ directive.
+
+Behavior Caveat: The default behavior is to _check_ if the file system exists.
+ If a file system matches the specification, then the operation is a no-op.
diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt
index 705f02c2..a459573d 100644
--- a/doc/examples/cloud-config-growpart.txt
+++ b/doc/examples/cloud-config-growpart.txt
@@ -19,6 +19,15 @@
# examples:
# devices: [/, /dev/vdb1]
#
+# ignore_growroot_disabled:
+# a boolean, default is false.
+# if the file /etc/growroot-disabled exists, then cloud-init will not grow
+# the root partition. This is to allow a single file to disable both
+# cloud-initramfs-growroot and cloud-init's growroot support.
+#
+# true indicates that /etc/growroot-disabled should be ignored
+#
growpart:
mode: auto
devices: ['/']
+ ignore_growroot_disabled: false
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index de5f321b..01548380 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -46,8 +46,8 @@ users:
# inactive: Create the user as inactive
# passwd: The hash -- not the password itself -- of the password you want
# to use for this user. You can generate a safe hash via:
-# mkpasswd -m SHA-512 -s 4096
-# (the above command would create a password SHA512 password hash
+# mkpasswd --method=SHA-512 --rounds=4096
+# (the above command would create from stdin an SHA-512 password hash
# with 4096 salt rounds)
#
# Please note: while the use of a hashed password is better than
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 59c58805..5543ed34 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -141,6 +141,12 @@ Config Drive
.. include:: ../../sources/configdrive/README.rst
---------------------------
+OpenNebula
+---------------------------
+
+.. include:: ../../sources/opennebula/README.rst
+
+---------------------------
Alt cloud
---------------------------
diff --git a/doc/sources/opennebula/README.rst b/doc/sources/opennebula/README.rst
new file mode 100644
index 00000000..4d7de27a
--- /dev/null
+++ b/doc/sources/opennebula/README.rst
@@ -0,0 +1,142 @@
+The `OpenNebula`_ (ON) datasource supports the contextualization disk.
+
+ See `contextualization overview`_, `contextualizing VMs`_ and
+ `network configuration`_ in the public documentation for
+ more information.
+
+OpenNebula's virtual machines are contextualized (parametrized) by
+CD-ROM image, which contains a shell script *context.sh* with
+custom variables defined on virtual machine start. There are no
+fixed contextualization variables, but the datasource accepts
+many used and recommended across the documentation.
+
+Datasource configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Datasource accepts following configuration options.
+
+::
+
+ dsmode:
+ values: local, net, disabled
+ default: net
+
+Tells if this datasource will be processed in 'local' (pre-networking) or
+'net' (post-networking) stage or even completely 'disabled'.
+
+::
+
+ parseuser:
+ default: nobody
+
+Unprivileged system user used for contextualization script
+processing.
+
+Contextualization disk
+~~~~~~~~~~~~~~~~~~~~~~
+
+The following criteria are required:
+
+1. Must be formatted with `iso9660`_ filesystem
+ or have a *filesystem* label of **CONTEXT** or **CDROM**
+2. Must contain file *context.sh* with contextualization variables.
+ File is generated by OpenNebula, it has a KEY='VALUE' format and
+ can be easily read by bash
+
+Contextualization variables
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are no fixed contextualization variables in OpenNebula, no standard.
+Following variables were found on various places and revisions of
+the OpenNebula documentation. Where multiple similar variables are
+specified, only first found is taken.
+
+::
+
+ DSMODE
+
+Datasource mode configuration override. Values: local, net, disabled.
+
+::
+
+ DNS
+ ETH<x>_IP
+ ETH<x>_NETWORK
+ ETH<x>_MASK
+ ETH<x>_GATEWAY
+ ETH<x>_DOMAIN
+ ETH<x>_DNS
+
+Static `network configuration`_.
+
+::
+
+ HOSTNAME
+
+Instance hostname.
+
+::
+
+ PUBLIC_IP
+ IP_PUBLIC
+ ETH0_IP
+
+If no hostname has been specified, cloud-init will try to create hostname
+from instance's IP address in 'local' dsmode. In 'net' dsmode, cloud-init
+tries to resolve one of its IP addresses to get hostname.
+
+::
+
+ SSH_KEY
+ SSH_PUBLIC_KEY
+
+One or multiple SSH keys (separated by newlines) can be specified.
+
+::
+
+ USER_DATA
+ USERDATA
+
+cloud-init user data.
+
+Example configuration
+~~~~~~~~~~~~~~~~~~~~~
+
+This example cloud-init configuration (*cloud.cfg*) enables
+OpenNebula datasource only in 'net' mode.
+
+::
+
+ disable_ec2_metadata: True
+ datasource_list: ['OpenNebula']
+ datasource:
+ OpenNebula:
+ dsmode: net
+ parseuser: nobody
+
+Example VM's context section
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ CONTEXT=[
+ PUBLIC_IP="$NIC[IP]",
+ SSH_KEY="$USER[SSH_KEY]
+ $USER[SSH_KEY1]
+ $USER[SSH_KEY2] ",
+ USER_DATA="#cloud-config
+ # see https://help.ubuntu.com/community/CloudInit
+
+ packages: []
+
+ mounts:
+ - [vdc,none,swap,sw,0,0]
+ runcmd:
+ - echo 'Instance has been configured by cloud-init.' | wall
+ " ]
+
+.. _OpenNebula: http://opennebula.org/
+.. _contextualization overview: http://opennebula.org/documentation:documentation:context_overview
+.. _contextualizing VMs: http://opennebula.org/documentation:documentation:cong
+.. _network configuration: http://opennebula.org/documentation:documentation:cong#network_configuration
+.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst
index fd4e496d..8b63e520 100644
--- a/doc/sources/smartos/README.rst
+++ b/doc/sources/smartos/README.rst
@@ -5,11 +5,13 @@ SmartOS Datasource
This datasource finds metadata and user-data from the SmartOS virtualization
platform (i.e. Joyent).
+Please see http://smartos.org/ for information about SmartOS.
+
SmartOS Platform
----------------
-The SmartOS virtualization platform meta-data to the instance via the second
-serial console. On Linux, this is /dev/ttyS1. The data is a provided via a
-simple protocol, where something queries for the userdata, where the console
+The SmartOS virtualization platform uses meta-data to the instance via the
+second serial console. On Linux, this is /dev/ttyS1. The data is a provided
+via a simple protocol: something queries for the data, the console responds
responds with the status and if "SUCCESS" returns until a single ".\n".
New versions of the SmartOS tooling will include support for base64 encoded data.
@@ -18,7 +20,7 @@ Userdata
--------
In SmartOS parlance, user-data is a actually meta-data. This userdata can be
-provided a key-value pairs.
+provided as key-value pairs.
Cloud-init supports reading the traditional meta-data fields supported by the
SmartOS tools. These are:
@@ -36,13 +38,13 @@ user-script
SmartOS traditionally supports sending over a user-script for execution at the
rc.local level. Cloud-init supports running user-scripts as if they were
cloud-init user-data. In this sense, anything with a shell interpreter
-directive will run
+directive will run.
user-data and user-script
-------------------------
In the event that a user defines the meta-data key of "user-data" it will
-always supercede any user-script data. This is for consistency.
+always supersede any user-script data. This is for consistency.
base64
------
@@ -70,3 +72,22 @@ or not to base64 decode something:
* no_base64_decode: This is a configuration setting
(i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be
base64 decoded.
+
+disk_aliases and ephemeral disk:
+---------------
+By default, SmartOS only supports a single ephemeral disk. That disk is
+completely empty (un-partitioned with no filesystem).
+
+The SmartOS datasource has built-in cloud-config which instructs the
+'disk_setup' module to partition and format the ephemeral disk.
+
+You can control the disk_setup then in 2 ways:
+ 1. through the datasource config, you can change the 'alias' of
+ ephermeral0 to reference another device. The default is:
+ 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+ Which means anywhere disk_setup sees a device named 'ephemeral0'
+ then /dev/vdb will be substituted.
+ 2. you can provide disk_setup or fs_setup data in user-data to overwrite
+ the datasource's built-in values.
+
+See doc/examples/cloud-config-disk-setup.txt for information on disk_setup.
diff --git a/packages/bddeb b/packages/bddeb
index 30559870..8de4d466 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -145,7 +145,7 @@ def main():
print("Creating a debian/ folder in %r" % (xdir))
if not args.no_cloud_utils:
- append_requires=['cloud-utils']
+ append_requires=['cloud-utils | cloud-guest-utils']
else:
append_requires=[]
write_debian_folder(xdir, version, revno, append_requires)
diff --git a/systemd/cloud-config.service b/systemd/cloud-config.service
index fc72fc48..41a86147 100644
--- a/systemd/cloud-config.service
+++ b/systemd/cloud-config.service
@@ -11,7 +11,7 @@ RemainAfterExit=yes
TimeoutSec=0
# Output needs to appear in instance console output
-StandardOutput=tty
+StandardOutput=journal+console
[Install]
WantedBy=multi-user.target
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service
index f836eab6..ef0f52b9 100644
--- a/systemd/cloud-final.service
+++ b/systemd/cloud-final.service
@@ -11,7 +11,7 @@ RemainAfterExit=yes
TimeoutSec=0
# Output needs to appear in instance console output
-StandardOutput=tty
+StandardOutput=journal+console
[Install]
WantedBy=multi-user.target
diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service
index 6a551710..a31985c6 100644
--- a/systemd/cloud-init-local.service
+++ b/systemd/cloud-init-local.service
@@ -10,7 +10,7 @@ RemainAfterExit=yes
TimeoutSec=0
# Output needs to appear in instance console output
-StandardOutput=tty
+StandardOutput=journal+console
[Install]
WantedBy=multi-user.target
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
index d4eb9fa5..018a1fa8 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service
@@ -11,7 +11,7 @@ RemainAfterExit=yes
TimeoutSec=0
# Output needs to appear in instance console output
-StandardOutput=tty
+StandardOutput=journal+console
[Install]
WantedBy=multi-user.target
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 1ca6a79d..aad84206 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -2,8 +2,8 @@ from cloudinit import helpers
from cloudinit.sources import DataSourceAzure
from tests.unittests.helpers import populate_dir
-import crypt
import base64
+import crypt
from mocker import MockerTestCase
import os
import yaml
@@ -120,8 +120,7 @@ class TestAzureDataSource(MockerTestCase):
mod = DataSourceAzure
- if data.get('dsdevs'):
- self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)])
+ self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)])
self.apply_patches([(mod, 'invoke_agent', _invoke_agent),
(mod, 'write_files', _write_files),
@@ -154,9 +153,12 @@ class TestAzureDataSource(MockerTestCase):
def test_user_cfg_set_agent_command_plain(self):
# set dscfg in via plaintext
- cfg = {'agent_command': "my_command"}
+ # we must have friendly-to-xml formatted plaintext in yaml_cfg
+ # not all plaintext is expected to work.
+ yaml_cfg = "{agent_command: my_command}\n"
+ cfg = yaml.safe_load(yaml_cfg)
odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': yaml.dump(cfg), 'encoding': 'plain'}}
+ 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
@@ -290,11 +292,57 @@ class TestAzureDataSource(MockerTestCase):
self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A")
+ def test_default_ephemeral(self):
+ # make sure the ephemeral device works
+ odata = {}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ self.assertEquals(dsrc.device_name_to_device("ephemeral0"),
+ "/dev/sdb")
+ assert 'disk_setup' in cfg
+ assert 'fs_setup' in cfg
+ self.assertIsInstance(cfg['disk_setup'], dict)
+ self.assertIsInstance(cfg['fs_setup'], list)
+
+ def test_provide_disk_aliases(self):
+ # Make sure that user can affect disk aliases
+ dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
+ odata = {'HostName': "myhost", 'UserName': "myuser",
+ 'dscfg': {'text': base64.b64encode(yaml.dump(dscfg)),
+ 'encoding': 'base64'}}
+ usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
+ 'ephemeral0': False}}
+ userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
+
+ ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
+ data = {'ovfcontent': ovfcontent, 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+ self.assertTrue(cfg)
+
+ def test_userdata_arrives(self):
+ userdata = "This is my user-data"
+ xml = construct_valid_ovf_env(data={}, userdata=userdata)
+ data = {'ovfcontent': xml}
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ self.assertEqual(userdata, dsrc.userdata_raw)
+
class TestReadAzureOvf(MockerTestCase):
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
- self.assertRaises(DataSourceAzure.NonAzureDataSource,
+ self.assertRaises(DataSourceAzure.BrokenAzureDataSource,
DataSourceAzure.read_azure_ovf, invalid_xml)
def test_load_with_pubkeys(self):
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
new file mode 100644
index 00000000..e1812a88
--- /dev/null
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -0,0 +1,267 @@
+from cloudinit import helpers
+from cloudinit.sources import DataSourceOpenNebula as ds
+from cloudinit import util
+from mocker import MockerTestCase
+from tests.unittests.helpers import populate_dir
+
+import os
+import pwd
+
+TEST_VARS = {
+ 'VAR1': 'single',
+ 'VAR2': 'double word',
+ 'VAR3': 'multi\nline\n',
+ 'VAR4': "'single'",
+ 'VAR5': "'double word'",
+ 'VAR6': "'multi\nline\n'",
+ 'VAR7': 'single\\t',
+ 'VAR8': 'double\\tword',
+ 'VAR9': 'multi\\t\nline\n',
+ 'VAR10': '\\', # expect \
+ 'VAR11': '\'', # expect '
+ 'VAR12': '$', # expect $
+}
+
+INVALID_CONTEXT = ';'
+USER_DATA = '#cloud-config\napt_upgrade: true'
+SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
+HOSTNAME = 'foo.example.com'
+PUBLIC_IP = '10.0.0.3'
+
+CMD_IP_OUT = '''\
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
+ link/ether 02:00:0a:12:01:01 brd ff:ff:ff:ff:ff:ff
+'''
+
+
+class TestOpenNebulaDataSource(MockerTestCase):
+ parsed_user = None
+
+ def setUp(self):
+ super(TestOpenNebulaDataSource, self).setUp()
+ self.tmp = self.makeDir()
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+
+ # defaults for few tests
+ self.ds = ds.DataSourceOpenNebula
+ self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula")
+ self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}}
+
+ # we don't want 'sudo' called in tests. so we patch switch_user_cmd
+ def my_switch_user_cmd(user):
+ self.parsed_user = user
+ return []
+
+ self.switch_user_cmd_real = ds.switch_user_cmd
+ ds.switch_user_cmd = my_switch_user_cmd
+
+ def tearDown(self):
+ ds.switch_user_cmd = self.switch_user_cmd_real
+ super(TestOpenNebulaDataSource, self).tearDown()
+
+ def test_get_data_non_contextdisk(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertFalse(ret)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+ def test_get_data_broken_contextdisk(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+ dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
+ self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+ def test_get_data_invalid_identity(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # generate non-existing system user name
+ sys_cfg = self.sys_cfg
+ invalid_user = 'invalid'
+ while not sys_cfg['datasource']['OpenNebula'].get('parseuser'):
+ try:
+ pwd.getpwnam(invalid_user)
+ invalid_user += 'X'
+ except KeyError:
+ sys_cfg['datasource']['OpenNebula']['parseuser'] = \
+ invalid_user
+
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
+ self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+ def test_get_data(self):
+ orig_find_devs_with = util.find_devs_with
+ try:
+ # dont' try to lookup for CDs
+ util.find_devs_with = lambda n: []
+ populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+ def test_seed_dir_non_contextdisk(self):
+ self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
+ self.seed_dir)
+
+ def test_seed_dir_empty1_context(self):
+ populate_dir(self.seed_dir, {'context.sh': ''})
+ results = ds.read_context_disk_dir(self.seed_dir)
+
+ self.assertEqual(results['userdata'], None)
+ self.assertEqual(results['metadata'], {})
+
+ def test_seed_dir_empty2_context(self):
+ populate_context_dir(self.seed_dir, {})
+ results = ds.read_context_disk_dir(self.seed_dir)
+
+ self.assertEqual(results['userdata'], None)
+ self.assertEqual(results['metadata'], {})
+
+ def test_seed_dir_broken_context(self):
+ populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+
+ self.assertRaises(ds.BrokenContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir)
+
+ def test_context_parser(self):
+ populate_context_dir(self.seed_dir, TEST_VARS)
+ results = ds.read_context_disk_dir(self.seed_dir)
+
+ self.assertTrue('metadata' in results)
+ self.assertEqual(TEST_VARS, results['metadata'])
+
+ def test_ssh_key(self):
+ public_keys = ['first key', 'second key']
+ for c in range(4):
+ for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
+ my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
+ populate_context_dir(my_d, {k: '\n'.join(public_keys)})
+ results = ds.read_context_disk_dir(my_d)
+
+ self.assertTrue('metadata' in results)
+ self.assertTrue('public-keys' in results['metadata'])
+ self.assertEqual(public_keys,
+ results['metadata']['public-keys'])
+
+ public_keys.append(SSH_KEY % (c + 1,))
+
+ def test_user_data(self):
+ for k in ('USER_DATA', 'USERDATA'):
+ my_d = os.path.join(self.tmp, k)
+ populate_context_dir(my_d, {k: USER_DATA})
+ results = ds.read_context_disk_dir(my_d)
+
+ self.assertTrue('userdata' in results)
+ self.assertEqual(USER_DATA, results['userdata'])
+
+ def test_hostname(self):
+ for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ my_d = os.path.join(self.tmp, k)
+ populate_context_dir(my_d, {k: PUBLIC_IP})
+ results = ds.read_context_disk_dir(my_d)
+
+ self.assertTrue('metadata' in results)
+ self.assertTrue('local-hostname' in results['metadata'])
+ self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname'])
+
+ def test_network_interfaces(self):
+ populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'})
+ results = ds.read_context_disk_dir(self.seed_dir)
+
+ self.assertTrue('network-interfaces' in results)
+
+ def test_find_candidates(self):
+ def my_devs_with(criteria):
+ return {
+ "LABEL=CONTEXT": ["/dev/sdb"],
+ "LABEL=CDROM": ["/dev/sr0"],
+ "TYPE=iso9660": ["/dev/vdb"],
+ }.get(criteria, [])
+
+ orig_find_devs_with = util.find_devs_with
+ try:
+ util.find_devs_with = my_devs_with
+ self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"],
+ ds.find_candidate_devs())
+ finally:
+ util.find_devs_with = orig_find_devs_with
+
+
+class TestOpenNebulaNetwork(MockerTestCase):
+
+ def setUp(self):
+ super(TestOpenNebulaNetwork, self).setUp()
+
+ def test_lo(self):
+ net = ds.OpenNebulaNetwork('', {})
+ self.assertEqual(net.gen_conf(), u'''\
+auto lo
+iface lo inet loopback
+''')
+
+ def test_eth0(self):
+ net = ds.OpenNebulaNetwork(CMD_IP_OUT, {})
+ self.assertEqual(net.gen_conf(), u'''\
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.18.1.1
+ network 10.18.1.0
+ netmask 255.255.255.0
+''')
+
+ def test_eth0_override(self):
+ context = {
+ 'DNS': '1.2.3.8',
+ 'ETH0_IP': '1.2.3.4',
+ 'ETH0_NETWORK': '1.2.3.0',
+ 'ETH0_MASK': '255.255.0.0',
+ 'ETH0_GATEWAY': '1.2.3.5',
+ 'ETH0_DOMAIN': 'example.com',
+ 'ETH0_DNS': '1.2.3.6 1.2.3.7'
+ }
+
+ net = ds.OpenNebulaNetwork(CMD_IP_OUT, context)
+ self.assertEqual(net.gen_conf(), u'''\
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 1.2.3.4
+ network 1.2.3.0
+ netmask 255.255.0.0
+ gateway 1.2.3.5
+ dns-search example.com
+ dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7
+''')
+
+
+def populate_context_dir(path, variables):
+ data = "# Context variables generated by OpenNebula\n"
+ for (k, v) in variables.iteritems():
+ data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
+ populate_dir(path, {'context.sh': data})
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index f53715b0..956767d8 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -79,7 +79,6 @@ class MockSerial(object):
if self.last in self.mockdata:
if not self.mocked_out:
self.mocked_out = [x for x in self._format_out()]
- print self.mocked_out
if len(self.mocked_out) > self.count:
self.count += 1
@@ -261,6 +260,40 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertEquals(MOCK_RETURNS['enable_motd_sys_info'],
dsrc.metadata['motd_sys_info'])
+ def test_default_ephemeral(self):
+ # Test to make sure that the builtin config has the ephemeral
+ # configuration.
+ dsrc = self._get_ds()
+ cfg = dsrc.get_config_obj()
+
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ assert 'disk_setup' in cfg
+ assert 'fs_setup' in cfg
+ self.assertIsInstance(cfg['disk_setup'], dict)
+ self.assertIsInstance(cfg['fs_setup'], list)
+
+ def test_override_disk_aliases(self):
+ # Test to make sure that the built-in DS is overriden
+ builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
+
+ mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}}
+
+ # expect that these values are in builtin, or this is pointless
+ for k in mydscfg:
+ self.assertIn(k, builtin)
+
+ dsrc = self._get_ds(ds_cfg=mydscfg)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ self.assertEqual(mydscfg['disk_aliases']['FOO'],
+ dsrc.ds_cfg['disk_aliases']['FOO'])
+
+ self.assertEqual(dsrc.device_name_to_device('FOO'),
+ mydscfg['disk_aliases']['FOO'])
+
def apply_patches(patches):
ret = []
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
new file mode 100644
index 00000000..2b21ac02
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -0,0 +1,150 @@
+ # Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# Based on test_handler_set_hostname.py
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.config import cc_seed_random
+
+import base64
+import gzip
+import tempfile
+
+from StringIO import StringIO
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.sources import DataSourceNone
+
+from tests.unittests import helpers as t_help
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+class TestRandomSeed(t_help.TestCase):
+ def setUp(self):
+ super(TestRandomSeed, self).setUp()
+ self._seed_file = tempfile.mktemp()
+
+ def tearDown(self):
+ util.del_file(self._seed_file)
+
+ def _compress(self, text):
+ contents = StringIO()
+ gz_fh = gzip.GzipFile(mode='wb', fileobj=contents)
+ gz_fh.write(text)
+ gz_fh.close()
+ return contents.getvalue()
+
+ def _get_cloud(self, distro, metadata=None):
+ paths = helpers.Paths({})
+ cls = distros.fetch(distro)
+ ubuntu_distro = cls(distro, {}, paths)
+ ds = DataSourceNone.DataSourceNone({}, ubuntu_distro, paths)
+ if metadata:
+ ds.metadata = metadata
+ return cloud.Cloud(ds, paths, {}, ubuntu_distro, None)
+
+ def test_append_random(self):
+ cfg = {
+ 'random_seed': {
+ 'file': self._seed_file,
+ 'data': 'tiny-tim-was-here',
+ }
+ }
+ cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEquals("tiny-tim-was-here", contents)
+
+ def test_append_random_unknown_encoding(self):
+ data = self._compress("tiny-toe")
+ cfg = {
+ 'random_seed': {
+ 'file': self._seed_file,
+ 'data': data,
+ 'encoding': 'special_encoding',
+ }
+ }
+ self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg,
+ self._get_cloud('ubuntu'), LOG, [])
+
+ def test_append_random_gzip(self):
+ data = self._compress("tiny-toe")
+ cfg = {
+ 'random_seed': {
+ 'file': self._seed_file,
+ 'data': data,
+ 'encoding': 'gzip',
+ }
+ }
+ cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEquals("tiny-toe", contents)
+
+ def test_append_random_gz(self):
+ data = self._compress("big-toe")
+ cfg = {
+ 'random_seed': {
+ 'file': self._seed_file,
+ 'data': data,
+ 'encoding': 'gz',
+ }
+ }
+ cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEquals("big-toe", contents)
+
+ def test_append_random_base64(self):
+ data = base64.b64encode('bubbles')
+ cfg = {
+ 'random_seed': {
+ 'file': self._seed_file,
+ 'data': data,
+ 'encoding': 'base64',
+ }
+ }
+ cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEquals("bubbles", contents)
+
+ def test_append_random_b64(self):
+ data = base64.b64encode('kit-kat')
+ cfg = {
+ 'random_seed': {
+ 'file': self._seed_file,
+ 'data': data,
+ 'encoding': 'b64',
+ }
+ }
+ cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEquals("kit-kat", contents)
+
+ def test_append_random_metadata(self):
+ cfg = {
+ 'random_seed': {
+ 'file': self._seed_file,
+ 'data': 'tiny-tim-was-here',
+ }
+ }
+ c = self._get_cloud('ubuntu', {'random_seed': '-so-was-josh'})
+ cc_seed_random.handle('test', cfg, c, LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEquals('tiny-tim-was-here-so-was-josh', contents)
diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py
index b227616c..5ffe8f0a 100644
--- a/tests/unittests/test_userdata.py
+++ b/tests/unittests/test_userdata.py
@@ -6,9 +6,9 @@ import gzip
import logging
import os
+from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
-from email.mime.application import MIMEApplication
from cloudinit import handlers
from cloudinit import helpers as c_helpers
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 87415cb5..38ab0c96 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -5,8 +5,8 @@ import stat
import yaml
from mocker import MockerTestCase
-from unittest import TestCase
from tests.unittests import helpers
+from unittest import TestCase
from cloudinit import importer
from cloudinit import util
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 92254189..3335f6a4 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -12,20 +12,21 @@ find_root() {
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
ROOT_DIR="$topd"
}
+fail() { echo "$0:" "$@" 1>&2; exit 1; }
if ! find_root; then
- echo "Unable to locate 'setup.py' file that should" \
- "exist in the cloud-init root directory." 1>&2
- exit 1;
+ fail "Unable to locate 'setup.py' file that should " \
+ "exist in the cloud-init root directory."
fi
REQUIRES="$ROOT_DIR/Requires"
if [ ! -e "$REQUIRES" ]; then
- echo "Unable to find 'Requires' file located at $REQUIRES"
- exit 1
+ fail "Unable to find 'Requires' file located at '$REQUIRES'"
fi
-# Filter out comments and empty liens
-DEPS=$(grep -v "^\s*#" "$REQUIRES" | grep -v '^\s*$')
+# Filter out comments and empty lines
+DEPS=$(sed -n -e 's,#.*,,' -e '/./p' "$REQUIRES") &&
+ [ -n "$DEPS" ] ||
+ fail "failed to read deps from '${REQUIRES}'"
echo "$DEPS" | sort -d -f
diff --git a/tools/read-version b/tools/read-version
index c317a89e..599f52cd 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -12,20 +12,21 @@ find_root() {
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
ROOT_DIR="$topd"
}
+fail() { echo "$0:" "$@" 1>&2; exit 1; }
if ! find_root; then
- echo "Unable to locate 'setup.py' file that should" \
- "exist in the cloud-init root directory." 1>&2
- exit 1;
+ fail "Unable to locate 'setup.py' file that should " \
+ "exist in the cloud-init root directory."
fi
CHNG_LOG="$ROOT_DIR/ChangeLog"
-if [ ! -e "$CHNG_LOG" ]
-then
- echo "Unable to find 'ChangeLog' file located at $CHNG_LOG"
- exit 1
+if [ ! -e "$CHNG_LOG" ]; then
+ fail "Unable to find 'ChangeLog' file located at '$CHNG_LOG'"
fi
-VERSION=$(grep "[0-9][0-9]*.[0-9][0-9]*.[0-9][0-9]*:" "$CHNG_LOG" | cut -f1 -d ":" | head -n 1)
+VERSION=$(sed -n '/^[0-9]\+[.][0-9]\+[.][0-9]\+:/ {s/://; p; :a;n; ba; }' \
+ "$CHNG_LOG") &&
+ [ -n "$VERSION" ] ||
+ fail "failed to get version from '$CHNG_LOG'"
echo "$VERSION"
diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf
index a94b1474..e8ebee96 100644
--- a/upstart/cloud-init-nonet.conf
+++ b/upstart/cloud-init-nonet.conf
@@ -14,7 +14,7 @@ script
SLEEP_CHILD=""
static_network_up() {
- local emitted="/run/network/static-network-up-emitted"
+ local emitted="/run/network/static-network-up-emitted"
# /run/network/static-network-up-emitted is written by
# upstart (via /etc/network/if-up.d/upstart). its presense would
# indicate that static-network-up has already fired.