summaryrefslogtreecommitdiff
path: root/extensions
diff options
context:
space:
mode:
authorAdam Coldrick <adam.coldrick@codethink.co.uk>2015-06-02 08:22:26 +0000
committerAdam Coldrick <adam.coldrick@codethink.co.uk>2015-06-02 13:55:31 +0000
commit6f49299467a09236d7c0c564fe55bc8eafa7defd (patch)
treec9c628dd466ebfcc895d2ccc9f16440b4955f1a6 /extensions
parent0cd8880023fc65ec581da95dd49a58b5996a1279 (diff)
downloaddefinitions-6f49299467a09236d7c0c564fe55bc8eafa7defd.tar.gz
Move extensions into a subdirectory
Change-Id: I12e7c03b30da78da1eb220d2826ce0003d6efe2e
Diffstat (limited to 'extensions')
-rwxr-xr-xextensions/add-config-files.configure26
-rw-r--r--extensions/busybox-init.configure145
-rw-r--r--extensions/ceph.configure266
-rwxr-xr-xextensions/cloud-init.configure63
-rwxr-xr-xextensions/distbuild-trove-nfsboot.check150
-rwxr-xr-xextensions/distbuild-trove-nfsboot.write283
-rw-r--r--extensions/distbuild-trove-nfsboot.write.help49
-rw-r--r--extensions/distbuild.configure132
-rwxr-xr-xextensions/fstab.configure28
-rw-r--r--extensions/hosts1
-rwxr-xr-xextensions/hosts.configure48
-rw-r--r--extensions/image-package-example/README9
-rw-r--r--extensions/image-package-example/common.sh.in72
-rw-r--r--extensions/image-package-example/disk-install.sh.in51
-rw-r--r--extensions/image-package-example/make-disk-image.sh.in36
-rwxr-xr-xextensions/image-package.write168
-rwxr-xr-xextensions/initramfs.write26
-rw-r--r--extensions/initramfs.write.help55
-rwxr-xr-xextensions/install-essential-files.configure42
-rw-r--r--extensions/install-essential-files.configure.help20
-rwxr-xr-xextensions/install-files.configure134
-rw-r--r--extensions/install-files.configure.help74
-rwxr-xr-xextensions/installer.configure48
-rw-r--r--extensions/jffs2.write64
-rw-r--r--extensions/jffs2.write.help28
-rwxr-xr-xextensions/kvm.check169
-rwxr-xr-xextensions/kvm.write120
-rw-r--r--extensions/kvm.write.help90
-rw-r--r--extensions/mason.configure153
-rw-r--r--extensions/mason/ansible/hosts1
-rw-r--r--extensions/mason/ansible/mason-setup.yml83
-rw-r--r--extensions/mason/httpd.service10
-rwxr-xr-xextensions/mason/mason-generator.sh101
-rwxr-xr-xextensions/mason/mason-report.sh252
-rw-r--r--extensions/mason/mason-setup.service16
-rw-r--r--extensions/mason/mason.service12
-rwxr-xr-xextensions/mason/mason.sh93
-rw-r--r--extensions/mason/mason.timer10
-rw-r--r--extensions/mason/os-init-script6
-rw-r--r--extensions/mason/share/mason.conf14
-rw-r--r--extensions/mason/share/os.conf30
-rw-r--r--extensions/moonshot-kernel.configure33
-rwxr-xr-xextensions/nfsboot-server.configure58
-rwxr-xr-xextensions/nfsboot.check95
-rwxr-xr-xextensions/nfsboot.configure30
-rwxr-xr-xextensions/nfsboot.write202
-rw-r--r--extensions/nfsboot.write.help33
-rw-r--r--extensions/openstack-ceilometer.configure120
-rw-r--r--extensions/openstack-cinder.configure125
-rw-r--r--extensions/openstack-glance.configure101
-rw-r--r--extensions/openstack-ironic.configure155
-rw-r--r--extensions/openstack-keystone.configure123
-rw-r--r--extensions/openstack-network.configure50
-rw-r--r--extensions/openstack-neutron.configure138
-rw-r--r--extensions/openstack-nova.configure168
-rw-r--r--extensions/openstack-swift-controller.configure49
-rwxr-xr-xextensions/openstack.check90
-rwxr-xr-xextensions/openstack.write93
-rw-r--r--extensions/openstack.write.help51
-rwxr-xr-xextensions/pxeboot.check86
-rw-r--r--extensions/pxeboot.write755
-rw-r--r--extensions/pxeboot.write.help166
-rwxr-xr-xextensions/rawdisk.check53
-rwxr-xr-xextensions/rawdisk.write108
-rw-r--r--extensions/rawdisk.write.help82
-rwxr-xr-xextensions/sdk.write284
-rwxr-xr-xextensions/set-hostname.configure26
-rwxr-xr-xextensions/simple-network.configure292
-rwxr-xr-xextensions/ssh-rsync.check64
-rwxr-xr-xextensions/ssh-rsync.write172
-rw-r--r--extensions/ssh-rsync.write.help50
-rwxr-xr-xextensions/sshkeys.configure25
-rwxr-xr-xextensions/strip-gplv3.configure101
-rw-r--r--extensions/swift-build-rings.yml34
-rwxr-xr-xextensions/swift-storage-devices-validate.py60
-rw-r--r--extensions/swift-storage.configure107
-rwxr-xr-xextensions/sysroot.check23
-rwxr-xr-xextensions/sysroot.write22
-rwxr-xr-xextensions/tar.check23
-rwxr-xr-xextensions/tar.write20
-rw-r--r--extensions/tar.write.help19
-rwxr-xr-xextensions/trove.configure148
-rw-r--r--extensions/trove.configure.help126
-rw-r--r--extensions/vagrant.configure55
-rwxr-xr-xextensions/vdaboot.configure33
-rwxr-xr-xextensions/virtualbox-ssh.check36
-rwxr-xr-xextensions/virtualbox-ssh.write211
-rw-r--r--extensions/virtualbox-ssh.write.help135
88 files changed, 8208 insertions, 0 deletions
diff --git a/extensions/add-config-files.configure b/extensions/add-config-files.configure
new file mode 100755
index 00000000..2cf96fd1
--- /dev/null
+++ b/extensions/add-config-files.configure
@@ -0,0 +1,26 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Copy all files located in $SRC_CONFIG_DIR to the image /etc.
+
+
+set -e
+
+if [ "x${SRC_CONFIG_DIR}" != x ]
+then
+ cp -r "$SRC_CONFIG_DIR"/* "$1/etc/"
+fi
+
diff --git a/extensions/busybox-init.configure b/extensions/busybox-init.configure
new file mode 100644
index 00000000..c7dba3b9
--- /dev/null
+++ b/extensions/busybox-init.configure
@@ -0,0 +1,145 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to configure a system
+# to use busybox for its init, if INIT_SYSTEM=busybox is specified.
+#
+# As well as checking INIT_SYSTEM, the following variables are used.
+#
+# Getty configuration:
+# * CONSOLE_DEVICE: Which device to spawn a getty on (default: ttyS0)
+# * CONSOLE_BAUDRATE: Baud rate of the console (default: 115200)
+# * CONSOLE_MODE: What kind of terminal this console emulates
+# (default: vt100)
+
+if [ "$INIT_SYSTEM" != busybox ]; then
+ echo Not configuring system to use busybox init.
+ exit 0
+fi
+
+set -e
+echo Configuring system to use busybox init
+
+RUN_SCRIPT=/etc/rcS
+INIT_SCRIPT=/sbin/init
+
+install_mdev_config(){
+ install -D -m644 /dev/stdin "$1" <<'EOF'
+# support module loading on hotplug
+$MODALIAS=.* root:root 660 @modprobe "$MODALIAS"
+
+# null may already exist; therefore ownership has to be changed with command
+null root:root 666 @chmod 666 $MDEV
+zero root:root 666
+full root:root 666
+random root:root 444
+urandom root:root 444
+hwrandom root:root 444
+grsec root:root 660
+
+kmem root:root 640
+mem root:root 640
+port root:root 640
+# console may already exist; therefore ownership has to be changed with command
+console root:root 600 @chmod 600 $MDEV
+ptmx root:root 666
+pty.* root:root 660
+
+# Typical devices
+
+tty root:root 666
+tty[0-9]* root:root 660
+vcsa*[0-9]* root:root 660
+ttyS[0-9]* root:root 660
+
+# block devices
+ram[0-9]* root:root 660
+loop[0-9]+ root:root 660
+sd[a-z].* root:root 660
+hd[a-z][0-9]* root:root 660
+md[0-9]* root:root 660
+sr[0-9]* root:root 660 @ln -sf $MDEV cdrom
+fd[0-9]* root:root 660
+
+# net devices
+SUBSYSTEM=net;.* root:root 600 @nameif
+tun[0-9]* root:root 600 =net/
+tap[0-9]* root:root 600 =net/
+EOF
+}
+
+install_start_script(){
+ install -D -m755 /dev/stdin "$1" <<'EOF'
+#!/bin/sh
+mount -t devtmpfs devtmpfs /dev
+mount -t proc proc /proc
+mount -t sysfs sysfs /sys
+mkdir -p /dev/pts
+mount -t devpts devpts /dev/pts
+
+echo /sbin/mdev >/proc/sys/kernel/hotplug
+mdev -s
+
+hostname -F /etc/hostname
+
+run-parts -a start /etc/init.d
+EOF
+}
+
+install_inittab(){
+ local inittab="$1"
+ local dev="$2"
+ local baud="$3"
+ local mode="$4"
+ install -D -m644 /dev/stdin "$1" <<EOF
+::sysinit:$RUN_SCRIPT
+
+::askfirst:-/bin/cttyhack /bin/sh
+::askfirst:/sbin/getty -L $dev $baud $mode
+
+::ctrlaltdel:/sbin/reboot
+::shutdown:/sbin/swapoff -a
+::shutdown:/bin/umount -a -r
+::restart:/sbin/init
+EOF
+}
+
+install_init_symlink(){
+ local initdir="$(dirname "$1")"
+ local initname="$(basename "$1")"
+ mkdir -p "$initdir"
+ cd "$initdir"
+ for busybox_dir in . ../bin ../sbin ../usr/bin ../usr/sbin; do
+ local busybox="$busybox_dir/busybox"
+ if [ ! -x "$busybox" ]; then
+ continue
+ fi
+ ln -sf "$busybox" "$initname"
+ return 0
+ done
+ echo Unable to find busybox >&2
+ exit 1
+}
+
+install_mdev_config "$1/etc/mdev.conf"
+
+install_start_script "$1$RUN_SCRIPT"
+
+install_inittab "$1/etc/inittab" "${CONSOLE_DEV-ttyS0}" \
+ "${CONSOLE_BAUD-115200}" "${CONSOLE_MODE-vt100}"
+
+install_init_symlink "$1$INIT_SCRIPT"
diff --git a/extensions/ceph.configure b/extensions/ceph.configure
new file mode 100644
index 00000000..c3cd92d1
--- /dev/null
+++ b/extensions/ceph.configure
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# Copyright (C) 2013 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import cliapp
+import sys
+import os
+import subprocess
+import shutil
+import re
+import stat
+
+systemd_monitor_template = """
+[Unit]
+Description=Ceph Monitor firstboot setup
+After=network-online.target
+
+[Service]
+ExecStart=/bin/bash -c "/root/setup-ceph-head | tee /root/monitor-setuplog"
+ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-monitor-fboot.service
+
+[Install]
+Wanted-By=multi-user.target
+"""
+
+systemd_monitor_fname_template = "ceph-monitor-fboot.service"
+
+systemd_osd_template = """
+[Unit]
+Description=Ceph osd firstboot setup
+After=network-online.target
+
+[Service]
+ExecStart=/bin/bash -c "/root/setup-ceph-node | tee /root/storage-setuplog"
+ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-storage-fboot.service
+
+[Install]
+Wanted-By=multi-user.target
+"""
+systemd_osd_fname_template = "ceph-storage-fboot.service"
+
+ceph_monitor_config_template = """#!/bin/bash
+ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
+ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
+ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
+monmaptool --create --add 0 10.0.100.2 --fsid 9ceb9257-7541-4de4-b34b-586079986700 /tmp/monmap
+mkdir /var/lib/ceph/mon/ceph-0
+ceph-mon --mkfs -i 0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
+/etc/init.d/ceph start mon.0
+touch ~/monitor-configured
+"""
+
+ceph_storage_config_template = """#!/bin/bash
+scp 10.0.100.2:/var/lib/ceph/bootstrap-osd/ceph.keyring /var/lib/ceph/bootstrap-osd/
+echo -e "n\np\n1\n\n\nw\n" | fdisk /dev/sdb
+ceph-disk prepare --cluster ceph --cluster-uuid 9ceb9257-7541-4de4-b34b-586079986700 --fs-type ext4 /dev/sdb1
+sudo ceph-disk activate /dev/sdb1
+/etc/init.d/ceph start osd.0
+touch ~/storage-configured
+"""
+
+executable_file_permissions = stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR | \
+ stat.S_IXGRP | stat.S_IRGRP | \
+ stat.S_IXOTH | stat.S_IROTH
+
+class CephConfigurationExtension(cliapp.Application):
+ """
+ Set up ceph server daemons.
+
+ Must include the following environment variables:
+
+ HOSTNAME - Must be defined it is used as the ID for
+ the monitor and metadata daemons.
+ CEPH_CONF - Provide a ceph configuration file.
+
+ Optional environment variables:
+
+ CEPH_CLUSTER - Cluster name, if not provided defaults to 'ceph'.
+
+ CEPH_BOOTSTRAP_OSD - Registered key capable of generating OSD
+ keys.
+ CEPH_BOOTSTRAP_MDS - Registered key capable of generating MDS
+ keys.
+
+ Bootstrap keys are required for creating OSD daemons on servers
+ that do not have a running monitor daemon. They are gathered
+ by 'ceph-deploy gatherkeys' but can be generated and registered
+ separately.
+
+ CEPH_MON - (Blank) Create a ceph monitor daemon on the image.
+ CEPH_MON_KEYRING - Location of monitor keyring. Required by the
+ monitor if using cephx authentication.
+
+ CEPH_OSD_X_DATA_DIR - Location of data directory for OSD.
+ Create an OSD daemon on image. 'X' is an integer
+ id, many osd daemons may be run on same server.
+
+ CEPH_MDS - (Blank) Create a metadata server daemon on server.
+ """
+
+ def process_args(self, args):
+
+ if "HOSTNAME" not in os.environ:
+ print "ERROR: Need a hostname defined by 'HOSTNAME'"
+ sys.exit(1)
+ if "CEPH_CLUSTER" not in os.environ:
+ print "ERROR: Need a cluster name defined by 'CEPH_CLUSTER'"
+ sys.exit(1)
+ if "CEPH_CONF" not in os.environ:
+ print "ERROR: Need a ceph conf file defined by 'CEPH_CONF'"
+ sys.exit(1)
+
+ self.dest_dir = args[0]
+
+ self.cluster_name = os.environ["CEPH_CLUSTER"]
+ self.hostname = os.environ["HOSTNAME"]
+
+ self.conf_file = "/etc/ceph/{}.conf".format(self.cluster_name)
+ self.mon_dir = "/var/lib/ceph/mon/"
+ self.osd_dir = "/var/lib/ceph/osd/"
+ self.mds_dir = "/var/lib/ceph/mds/"
+ self.tmp_dir = "/var/lib/ceph/tmp/"
+ self.bootstrap_mds_dir = "/var/lib/ceph/bootstrap-mds/"
+ self.bootstrap_osd_dir = "/var/lib/ceph/bootstrap-osd/"
+ self.systemd_dir = "/etc/systemd/system/"
+ self.systemd_multiuser_dir = "/etc/systemd/system/multi-user.target.wants/"
+
+ self.copy_to_img(os.environ["CEPH_CONF"], self.conf_file)
+
+ # Copy over bootstrap keyrings
+ if "CEPH_BOOTSTRAP_OSD" in os.environ:
+ self.copy_bootstrap_osd(os.environ["CEPH_BOOTSTRAP_OSD"]);
+ if "CEPH_BOOTSTRAP_MDS" in os.environ:
+ self.copy_bootstrap_mds(os.environ["CEPH_BOOTSTRAP_MDS"]);
+
+ # Configure any monitor daemons
+ if "CEPH_MON" in os.environ:
+ self.create_mon_data_dir(os.environ.get("CEPH_MON_KEYRING"))
+ else:
+ self.create_osd_startup_script("None", "None")
+
+ # Configure any object storage daemons
+ osd_re = r"CEPH_OSD_(\d+)_DATA_DIR$"
+
+ for env in os.environ.keys():
+ match = re.match(osd_re, env)
+ if match:
+ osd_data_dir_env = match.group(0)
+ osd_id = match.group(1)
+
+ self.create_osd_data_dir(osd_id, os.environ.get(osd_data_dir_env))
+
+ # Configure any mds daemons
+ if "CEPH_MDS" in os.environ:
+ self.create_mds_data_dir()
+
+ # Create a fake 'partprobe'
+ fake_partprobe_filename = self.dest_dir + "/sbin/partprobe"
+ fake_partprobe = open(fake_partprobe_filename, 'w')
+ fake_partprobe.write("#!/bin/bash\nexit 0;\n")
+ fake_partprobe.close()
+ os.chmod(fake_partprobe_filename, executable_file_permissions)
+ self.create_startup_scripts()
+
+ def copy_to_img(self, src_file, dest_file):
+ shutil.copy(src_file, self.dest_dir + dest_file)
+
+ def copy_bootstrap_osd(self, src_file):
+ self.copy_to_img(src_file,
+ os.path.join(self.bootstrap_osd_dir, "{}.keyring".format(self.cluster_name)))
+
+ def copy_bootstrap_mds(self, src_file):
+ self.copy_to_img(src_file,
+ os.path.join(self.bootstrap_mds_dir, "{}.keyring".format(self.cluster_name)))
+
+ def symlink_to_multiuser(self, fname):
+ print >> sys.stderr, os.path.join("../", fname)
+ print >> sys.stderr, self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname)
+ os.symlink(os.path.join("../", fname),
+ self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname))
+
+ def create_mon_data_dir(self, src_keyring):
+
+ #Create the monitor data directory
+ mon_data_dir = os.path.join(self.mon_dir, "{}-{}".format(self.cluster_name, self.hostname))
+ os.makedirs(self.dest_dir + mon_data_dir)
+
+ #Create sysvinit file to start via sysvinit
+ sysvinit_file = os.path.join(mon_data_dir, "sysvinit")
+ open(self.dest_dir + sysvinit_file, 'a').close()
+
+ #Create systemd file to initialize the monitor data directory
+ keyring = ""
+ if src_keyring:
+ #Copy the keyring from local to the image
+ dest_keyring = os.path.join(self.tmp_dir,
+ "{}-{}.mon.keyring".format(self.cluster_name, self.hostname))
+ self.copy_to_img(src_keyring, dest_keyring)
+ keyring = "--keyring " + dest_keyring
+
+ mon_systemd_fname = systemd_monitor_fname_template
+ systemd_script_name = self.dest_dir + os.path.join(self.systemd_dir, mon_systemd_fname)
+ mon_systemd = open(systemd_script_name, 'w')
+ mon_systemd.write(systemd_monitor_template)
+ mon_systemd.close()
+ #Create a symlink to the multi user target
+ self.symlink_to_multiuser(mon_systemd_fname)
+
+ def create_osd_data_dir(self, osd_id, data_dir):
+ if not data_dir:
+ data_dir = '/srv/osd' + osd_id
+
+ #Create the osd data dir
+ os.makedirs(self.dest_dir + data_dir)
+
+ def create_osd_startup_script(self, osd_id, data_dir):
+ osd_systemd_fname = systemd_osd_fname_template
+ osd_full_name = self.dest_dir + os.path.join(self.systemd_dir, osd_systemd_fname)
+
+ osd_systemd = open(osd_full_name, 'w')
+
+ osd_systemd.write(systemd_osd_template)
+ osd_systemd.close()
+
+ #Create a symlink to the multi user target
+ self.symlink_to_multiuser(osd_systemd_fname)
+
+ def create_mds_data_dir(self):
+
+ #Create the monitor data directory
+ mds_data_dir = os.path.join(self.mds_dir, "{}-{}".format(self.cluster_name, self.hostname))
+ os.makedirs(self.dest_dir + mds_data_dir)
+
+ #Create sysvinit file to start via sysvinit
+ sysvinit_file = os.path.join(mds_data_dir, "sysvinit")
+ open(self.dest_dir + sysvinit_file, 'a').close()
+
+
+ def create_startup_scripts(self):
+ head_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-head")
+
+ ceph_head_setup = open(head_setup_file, "w")
+ ceph_head_setup.write(ceph_monitor_config_template)
+ ceph_head_setup.close()
+ os.chmod(head_setup_file, executable_file_permissions)
+
+ osd_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-node")
+ ceph_node_setup = open(osd_setup_file, "w")
+ ceph_node_setup.write(ceph_storage_config_template)
+ ceph_node_setup.close()
+ os.chmod(osd_setup_file, executable_file_permissions)
+
+
+CephConfigurationExtension().run()
diff --git a/extensions/cloud-init.configure b/extensions/cloud-init.configure
new file mode 100755
index 00000000..aa83e0e2
--- /dev/null
+++ b/extensions/cloud-init.configure
@@ -0,0 +1,63 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#
+# This is a "morph deploy" configuration extension to enable the
+# cloud-init services.
+set -e
+
+ROOT="$1"
+
+##########################################################################
+
+set -e
+
+case "$CLOUD_INIT" in
+''|False|no)
+ exit 0
+ ;;
+True|yes)
+ echo "Configuring cloud-init"
+ ;;
+*)
+ echo Unrecognised option "$CLOUD_INIT" to CLOUD_INIT
+ exit 1
+ ;;
+esac
+
+
+cloud_init_services="cloud-config.service
+ cloud-init-local.service
+ cloud-init.service
+ cloud-final.service"
+
+# Iterate over the cloud-init services and enable them creating a link
+# into /etc/systemd/system/multi-user.target.wants.
+# If the services to link are not present, fail.
+
+services_folder="lib/systemd/system"
+for service_name in $cloud_init_services; do
+ if [ ! -f "$ROOT/$services_folder/$service_name" ]; then
+ echo "ERROR: Service $service_name is missing." >&2
+ echo "Failed to configure cloud-init."
+ exit 1
+ else
+ echo Enabling systemd service "$service_name" >"$MORPH_LOG_FD"
+ ln -sf "/$services_folder/$service_name" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$service_name"
+ fi
+done
diff --git a/extensions/distbuild-trove-nfsboot.check b/extensions/distbuild-trove-nfsboot.check
new file mode 100755
index 00000000..38c491e5
--- /dev/null
+++ b/extensions/distbuild-trove-nfsboot.check
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'distbuild-trove-nfsboot' write extension'''
+
+import cliapp
+import logging
+import os
+
+import morphlib.writeexts
+
+
+class DistbuildTroveNFSBootCheckExtension(morphlib.writeexts.WriteExtension):
+
+ nfsboot_root = '/srv/nfsboot'
+ remote_user = 'root'
+
+ required_vars = [
+ 'DISTBUILD_CONTROLLER',
+ 'DISTBUILD_GIT_SERVER',
+ 'DISTBUILD_SHARED_ARTIFACT_CACHE',
+ 'DISTBUILD_TROVE_ID',
+ 'DISTBUILD_WORKERS',
+ 'DISTBUILD_WORKER_SSH_KEY',
+ ]
+
+ def system_path(self, system_name, version_label=None):
+ if version_label:
+ return os.path.join(self.nfsboot_root, system_name, 'systems',
+ version_label, 'run')
+ else:
+ return os.path.join(self.nfsboot_root, system_name)
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ nfs_host = args[0]
+ nfs_netloc = '%s@%s' % (self.remote_user, nfs_host)
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+
+ missing_vars = [var for var in self.required_vars
+ if not var in os.environ]
+ if missing_vars:
+ raise cliapp.AppException(
+ 'Please set: %s' % ', '.join(missing_vars))
+
+ controllers = os.getenv('DISTBUILD_CONTROLLER').split()
+ workers = os.getenv('DISTBUILD_WORKERS').split()
+
+ if len(controllers) != 1:
+ raise cliapp.AppException('Please specify exactly one controller.')
+
+ if len(workers) == 0:
+ raise cliapp.AppException('Please specify at least one worker.')
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+
+ self.check_good_server(nfs_netloc)
+
+ system_names = set(controllers + workers)
+ for system_name in system_names:
+ if upgrade:
+ self.check_upgradeable(nfs_netloc, system_name, version_label)
+ else:
+ system_path = self.system_path(system_name)
+
+ if self.remote_directory_exists(nfs_netloc, system_path):
+ if self.get_environment_boolean('OVERWRITE') == False:
+ raise cliapp.AppException(
+ 'System %s already exists at %s:%s. Try `morph '
+ 'upgrade` instead of `morph deploy`.' % (
+ system_name, nfs_netloc, system_path))
+
+ def check_good_server(self, netloc):
+ # FIXME: assumes root
+ self.check_ssh_connectivity(netloc.split('@')[-1])
+
+ # Is an NFS server
+ try:
+ cliapp.ssh_runcmd(
+ netloc, ['test', '-e', '/etc/exports'])
+ except cliapp.AppException:
+ raise cliapp.AppException('server %s is not an nfs server'
+ % netloc)
+ try:
+ cliapp.ssh_runcmd(
+ netloc, ['systemctl', 'is-enabled', 'nfs-server.service'])
+
+ except cliapp.AppException:
+ raise cliapp.AppException('server %s does not control its '
+ 'nfs server by systemd' % netloc)
+
+ # TFTP server exports /srv/nfsboot/tftp
+ tftp_root = os.path.join(self.nfsboot_root, 'tftp')
+ try:
+ cliapp.ssh_runcmd(
+ netloc, ['test' , '-d', tftp_root])
+ except cliapp.AppException:
+ raise cliapp.AppException('server %s does not export %s' %
+ (netloc, tftp_root))
+
+ def check_upgradeable(self, nfs_netloc, system_name, version_label):
+ '''Check that there is already a version of the system present.
+
+ Distbuild nodes are stateless, so an upgrade is actually pretty much
+ the same as an initial deployment. This test is just a sanity check.
+
+ '''
+ system_path = self.system_path(system_name)
+ system_version_path = self.system_path(system_name, version_label)
+
+ if not self.remote_directory_exists(nfs_netloc, system_path):
+ raise cliapp.AppException(
+ 'System %s not found at %s:%s, cannot deploy an upgrade.' % (
+ system_name, nfs_netloc, system_path))
+
+ if self.remote_directory_exists(nfs_netloc, system_version_path):
+ if self.get_environment_boolean('OVERWRITE'):
+ pass
+ else:
+ raise cliapp.AppException(
+ 'System %s version %s already exists at %s:%s.' % (
+ system_name, version_label, nfs_netloc,
+ system_version_path))
+
+ def remote_directory_exists(self, nfs_netloc, path):
+ try:
+ cliapp.ssh_runcmd(nfs_netloc, ['test', '-d', path])
+ except cliapp.AppException as e:
+ logging.debug('SSH exception: %s', e)
+ return False
+
+ return True
+
+
+DistbuildTroveNFSBootCheckExtension().run()
diff --git a/extensions/distbuild-trove-nfsboot.write b/extensions/distbuild-trove-nfsboot.write
new file mode 100755
index 00000000..a5a5b094
--- /dev/null
+++ b/extensions/distbuild-trove-nfsboot.write
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''Morph .write extension for a distbuild network booting off a Trove with NFS.
+
+'''
+
+
+import os
+import sys
+import tempfile
+
+import cliapp
+import morphlib.writeexts
+
+
+class DistbuildTroveNFSBootWriteExtension(morphlib.writeexts.WriteExtension):
+
+ '''Create an NFS root and kernel on TFTP during Morph's deployment.
+
+ See distbuild-trove-nfsboot.help for documentation.
+
+ '''
+
+ nfsboot_root = '/srv/nfsboot'
+ remote_user = 'root'
+
+ def system_path(self, system_name, version_label=None):
+ if version_label:
+ # The 'run' directory is kind of a historical artifact. Baserock
+ # systems that have Btrfs root disks maintain an orig/ and a run/
+ # subvolume, so that one can find changes that have been made at
+ # runtime. For distbuild systems, this isn't necessary because the
+ # root filesystems of the nodes are effectively stateless. However,
+ # existing systems have bootloaders configured to look for the
+ # 'run' directory, so we need to keep creating it.
+ return os.path.join(self.nfsboot_root, system_name, 'systems',
+ version_label, 'run')
+ else:
+ return os.path.join(self.nfsboot_root, system_name)
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ local_system_path, nfs_host = args
+
+ nfs_netloc = '%s@%s' % (self.remote_user, nfs_host)
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+
+ controller_name = os.getenv('DISTBUILD_CONTROLLER')
+ worker_names = os.getenv('DISTBUILD_WORKERS').split()
+ system_names = set([controller_name] + worker_names)
+
+ git_server = os.getenv('DISTBUILD_GIT_SERVER')
+ shared_artifact_cache = os.getenv('DISTBUILD_SHARED_ARTIFACT_CACHE')
+ trove_id = os.getenv('DISTBUILD_TROVE_ID')
+ worker_ssh_key_path = os.getenv('DISTBUILD_WORKER_SSH_KEY')
+
+ host_map = self.parse_host_map_string(os.getenv('HOST_MAP', ''))
+
+ kernel_relpath = self.find_kernel(local_system_path)
+
+ copied_rootfs = None
+ for system_name in system_names:
+ remote_system_path = self.system_path(system_name, version_label)
+ if copied_rootfs is None:
+ self.transfer_system(
+ nfs_netloc, local_system_path, remote_system_path)
+ copied_rootfs = remote_system_path
+ else:
+ self.duplicate_remote_system(
+ nfs_netloc, copied_rootfs, remote_system_path)
+
+ for system_name in system_names:
+ remote_system_path = self.system_path(system_name, version_label)
+ self.link_kernel_to_tftpboot_path(
+ nfs_netloc, system_name, version_label, kernel_relpath)
+ self.set_hostname(
+ nfs_netloc, system_name, remote_system_path)
+ self.write_distbuild_config(
+ nfs_netloc, system_name, remote_system_path, git_server,
+ shared_artifact_cache, trove_id, worker_ssh_key_path,
+ controller_name, worker_names, host_map=host_map)
+
+ self.configure_nfs_exports(nfs_netloc, system_names)
+
+ for system_name in system_names:
+ self.update_default_version(nfs_netloc, system_name, version_label)
+
+ def parse_host_map_string(self, host_map_string):
+ '''Parse the HOST_MAP variable
+
+ Returns a dict mapping hostname to value (where value is an IP
+ address, a fully-qualified domain name, an alternate hostname, or
+ whatever).
+
+ '''
+ pairs = host_map_string.split(' ')
+ return morphlib.util.parse_environment_pairs({}, pairs)
+
+ def transfer_system(self, nfs_netloc, local_system_path,
+ remote_system_path):
+ self.status(msg='Copying rootfs to %(nfs_netloc)s',
+ nfs_netloc=nfs_netloc)
+ cliapp.ssh_runcmd(
+ nfs_netloc, ['mkdir', '-p', remote_system_path])
+ # The deployed rootfs may have been created by OSTree, so definitely
+ # don't pass --hard-links to `rsync`.
+ cliapp.runcmd(
+ ['rsync', '--archive', '--delete', '--info=progress2',
+ '--protect-args', '--partial', '--sparse', '--xattrs',
+ local_system_path + '/',
+ '%s:%s' % (nfs_netloc, remote_system_path)], stdout=sys.stdout)
+
+ def duplicate_remote_system(self, nfs_netloc, source_system_path,
+ target_system_path):
+ self.status(msg='Duplicating rootfs to %(target_system_path)s',
+ target_system_path=target_system_path)
+ cliapp.ssh_runcmd(nfs_netloc,
+ ['mkdir', '-p', target_system_path])
+ # We can't pass --info=progress2 here, because it may not be available
+ # in the remote 'rsync'. The --info setting was added in RSync 3.1.0,
+ # old versions of Baserock have RSync 3.0.9. So the user doesn't get
+ # any progress info on stdout for the 'duplicate' stage.
+ cliapp.ssh_runcmd(nfs_netloc,
+ ['rsync', '--archive', '--delete', '--protect-args', '--partial',
+ '--sparse', '--xattrs', source_system_path + '/',
+ target_system_path], stdout=sys.stdout)
+
+ def find_kernel(self, local_system_path):
+ bootdir = os.path.join(local_system_path, 'boot')
+ image_names = ['vmlinuz', 'zImage', 'uImage']
+
+ for name in image_names:
+ try_path = os.path.join(bootdir, name)
+ if os.path.exists(try_path):
+ kernel_path = os.path.relpath(try_path, local_system_path)
+ break
+ else:
+ raise cliapp.AppException(
+ 'Could not find a kernel in the system: none of '
+ '%s found' % ', '.join(image_names))
+ return kernel_path
+
+ def link_kernel_to_tftpboot_path(self, nfs_netloc, system_name,
+ version_label, kernel_relpath):
+ '''Create links for TFTP server for a system's kernel.'''
+
+ remote_system_path = self.system_path(system_name, version_label)
+ kernel_dest = os.path.join(remote_system_path, kernel_relpath)
+
+ self.status(msg='Creating links to %(name)s kernel in tftp directory',
+ name=system_name)
+ tftp_dir = os.path.join(self.nfsboot_root , 'tftp')
+
+ versioned_kernel_name = "%s-%s" % (system_name, version_label)
+ kernel_name = system_name
+
+ cliapp.ssh_runcmd(nfs_netloc,
+ ['ln', '-f', kernel_dest,
+ os.path.join(tftp_dir, versioned_kernel_name)])
+
+ cliapp.ssh_runcmd(nfs_netloc,
+ ['ln', '-sf', versioned_kernel_name,
+ os.path.join(tftp_dir, kernel_name)])
+
+ def set_remote_file_contents(self, nfs_netloc, path, text):
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(text)
+ f.flush()
+ cliapp.runcmd(
+ ['scp', f.name, '%s:%s' % (nfs_netloc, path)])
+
+ def set_hostname(self, nfs_netloc, system_name, system_path):
+ hostname_path = os.path.join(system_path, 'etc', 'hostname')
+ self.set_remote_file_contents(
+ nfs_netloc, hostname_path, system_name + '\n')
+
+ def write_distbuild_config(self, nfs_netloc, system_name, system_path,
+ git_server, shared_artifact_cache, trove_id,
+ worker_ssh_key_path, controller_name,
+ worker_names, host_map = {}):
+ '''Write /etc/distbuild/distbuild.conf on the node.
+
+ This .write extension takes advantage of the 'generic' mode of
+ distbuild.configure. Each node is not configured until first-boot,
+ when distbuild-setup.service runs and configures the node based on the
+ contents of /etc/distbuild/distbuild.conf.
+
+ '''
+ def host(hostname):
+ return host_map.get(hostname, hostname)
+
+ config = {
+ 'ARTIFACT_CACHE_SERVER': host(shared_artifact_cache),
+ 'CONTROLLERHOST': host(controller_name),
+ 'TROVE_HOST': host(git_server),
+ 'TROVE_ID': trove_id,
+ 'DISTBUILD_CONTROLLER': system_name == controller_name,
+ 'DISTBUILD_WORKER': system_name in worker_names,
+ 'WORKERS': ', '.join(map(host, worker_names)),
+ 'WORKER_SSH_KEY': '/etc/distbuild/worker.key',
+ }
+
+ config_text = '\n'.join(
+ '%s: %s' % (key, value) for key, value in config.iteritems())
+ config_text = \
+ '# Generated by distbuild-trove-nfsboot.write\n' + \
+ config_text + '\n'
+ path = os.path.join(system_path, 'etc', 'distbuild')
+ cliapp.ssh_runcmd(
+ nfs_netloc, ['mkdir', '-p', path])
+ cliapp.runcmd(
+ ['scp', worker_ssh_key_path, '%s:%s' % (nfs_netloc, path)])
+ self.set_remote_file_contents(
+ nfs_netloc, os.path.join(path, 'distbuild.conf'), config_text)
+
+ def configure_nfs_exports(self, nfs_netloc, system_names):
+ '''Ensure the Trove is set up to export the NFS roots we need.
+
+ This doesn't handle setting up the TFTP daemon. We assume that is
+ already running.
+
+ '''
+ for system_name in system_names:
+ exported_path = self.system_path(system_name)
+ exports_path = '/etc/exports'
+
+ # Rather ugly SSH hackery follows to ensure each system path is
+ # listed in /etc/exports.
+ try:
+ cliapp.ssh_runcmd(
+ nfs_netloc, ['grep', '-q', exported_path, exports_path])
+ except cliapp.AppException:
+ ip_mask = '*'
+ options = 'rw,no_subtree_check,no_root_squash,async'
+ exports_string = '%s %s(%s)\n' % (exported_path, ip_mask,
+ options)
+ exports_append_sh = '''\
+ set -eu
+ target="$1"
+ temp=$(mktemp)
+ cat "$target" > "$temp"
+ cat >> "$temp"
+ mv "$temp" "$target"
+ '''
+ cliapp.ssh_runcmd(
+ nfs_netloc,
+ ['sh', '-c', exports_append_sh, '--', exports_path],
+ feed_stdin=exports_string)
+
+ cliapp.ssh_runcmd(nfs_netloc,
+ ['systemctl', 'restart', 'nfs-server.service'])
+
+ def update_default_version(self, remote_netloc, system_name,
+ version_label):
+ self.status(msg='Linking \'default\' to %(version)s for %(system)s',
+ version=version_label, system=system_name)
+ system_path = self.system_path(system_name)
+ system_version_path = os.path.join(system_path, 'systems',
+ version_label)
+ default_path = os.path.join(system_path, 'systems', 'default')
+
+ cliapp.ssh_runcmd(remote_netloc,
+ ['ln', '-sfn', system_version_path, default_path])
+
+
+DistbuildTroveNFSBootWriteExtension().run()
diff --git a/extensions/distbuild-trove-nfsboot.write.help b/extensions/distbuild-trove-nfsboot.write.help
new file mode 100644
index 00000000..62f1455c
--- /dev/null
+++ b/extensions/distbuild-trove-nfsboot.write.help
@@ -0,0 +1,49 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ Deploy a distbuild network, using a Trove to serve the kernel and rootfs.
+
+ The `location` argument is the hostname of the Trove system.
+
+ The following configuration values must be specified:
+
+ - DISTBUILD_CONTROLLER: hostname of controller system
+ - DISTBUILD_WORKERS: hostnames of each worker system
+ - DISTBUILD_GIT_SERVER: Trove hostname
+ - DISTBUILD_SHARED_ARTIFACT_CACHE: Trove hostname
+ - DISTBUILD_TROVE_ID: Trove ID
+ - DISTBUILD_WORKER_SSH_KEY: SSH key to be used for ssh:// repos
+
+ A note on TROVE_ID: the current distbuild-setup service requires that
+ a single 'Trove ID' is specified. This is used in Morph for expanding
+ keyed URLs. If you set TROVE_ID=foo for example, foo:bar will be expanded
+ to git://$GIT_SERVER/foo, in addition to the standard baserock: and
+ upstream: prefixes that you can use.
+
+ The WORKER_SSH_KEY must be provided, even if you don't need it. The
+ distbuild-setup service could be changed to make it optional.
+
+ The following configuration values are optional:
+
+ - HOST_MAP: a list of key=value pairs mapping hostnames to IP addresses,
+ or fully-qualified domain names. Useful if you
+ cannot rely on hostname resolution working for your deploment.
+
+ The extension will connect to root@location via ssh to copy the kernel and
+ rootfs, and configure the nfs server. It will duplicate the kernel and
+ rootfs once for each node in the distbuild network.
+
+ The deployment mechanism makes assumptions about the bootloader
+ configuration of the target machines.
diff --git a/extensions/distbuild.configure b/extensions/distbuild.configure
new file mode 100644
index 00000000..062aaecc
--- /dev/null
+++ b/extensions/distbuild.configure
@@ -0,0 +1,132 @@
+#!/bin/sh
+# Copyright (C) 2013-2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configure extension to configure a Baserock
+# build node, as part of a distributed building cluster. It uses the
+# following variables from the environment:
+#
+# * DISTBUILD_CONTROLLER: if 'yes', machine is set up as the controller.
+# * DISTBUILD_WORKER: if 'yes', machine is set up as a worker.
+# * TROVE_ID: hostname and Trove prefix of the server to pull source
+# from and push built artifacts to.
+# * TROVE_HOST: FQDN of the same server as in TROVE_ID
+#
+# The following variable is optional:
+#
+# * ARTIFACT_CACHE_SERVER: by default artifacts are pushed to the same
+# Trove that served the source, but you can use a different one.
+#
+# The following variable is required for worker nodes only:
+#
+# * CONTROLLERHOST: hostname or IP address of distbuild controller machine.
+# * WORKER_SSH_KEY: identity used to authenticate with Trove
+#
+# The following variable is required for the controller node only:
+#
+# * WORKERS: hostnames or IP address of worker nodes, comma-separated.
+
+set -e
+
+if [ -n "$DISTBUILD_GENERIC" ]; then
+ echo "Not configuring the distbuild node, it will be generic"
+ exit 0
+fi
+
+# Set default values for these two options if they are unset, so that if the
+# user specifies no distbuild config at all the configure extension exits
+# without doing anything but does not raise an error.
+DISTBUILD_CONTROLLER=${DISTBUILD_CONTROLLER-False}
+DISTBUILD_WORKER=${DISTBUILD_WORKER-False}
+
+if [ "$DISTBUILD_CONTROLLER" = False -a "$DISTBUILD_WORKER" = False ]; then
+ exit 0
+fi
+
+set -u
+
+# Check that all the variables needed are present:
+
+error_vars=false
+
+if [ "x$TROVE_HOST" = "x" ]; then
+ echo "ERROR: TROVE_HOST needs to be defined."
+ error_vars=true
+fi
+
+if [ "x$TROVE_ID" = "x" ]; then
+ echo "ERROR: TROVE_ID needs to be defined."
+ error_vars=true
+fi
+
+if [ "$DISTBUILD_WORKER" = True ]; then
+ if ! ssh-keygen -lf "$WORKER_SSH_KEY" > /dev/null 2>&1; then
+ echo "ERROR: WORKER_SSH_KEY is not a vaild ssh key."
+ error_vars=true
+ fi
+
+ if [ "x$CONTROLLERHOST" = "x" ]; then
+ echo "ERROR: CONTROLLERHOST needs to be defined."
+ error_vars=true
+ fi
+fi
+
+if [ "$DISTBUILD_CONTROLLER" = True ]; then
+ if [ "x$WORKERS" = "x" ]; then
+ echo "ERROR: WORKERS needs to be defined."
+ error_vars=true
+ fi
+fi
+
+if "$error_vars"; then
+ exit 1
+fi
+
+
+ROOT="$1"
+
+DISTBUILD_DATA="$ROOT/etc/distbuild"
+mkdir -p "$DISTBUILD_DATA"
+
+# If it's a worker, install the worker ssh key.
+if [ "$DISTBUILD_WORKER" = True ]
+then
+ install -m 0644 "$WORKER_SSH_KEY" "$DISTBUILD_DATA/worker.key"
+fi
+
+
+
+# Create the configuration file
+python <<'EOF' >"$DISTBUILD_DATA/distbuild.conf"
+import os, sys, yaml
+
+distbuild_configuration={
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_HOST': os.environ['TROVE_HOST'],
+ 'DISTBUILD_WORKER': os.environ['DISTBUILD_WORKER'],
+ 'DISTBUILD_CONTROLLER': os.environ['DISTBUILD_CONTROLLER'],
+ 'WORKER_SSH_KEY': '/etc/distbuild/worker.key',
+}
+
+
+optional_keys = ('ARTIFACT_CACHE_SERVER', 'CONTROLLERHOST', 'WORKERS',
+ 'TROVE_BACKUP_KEYS')
+
+for key in optional_keys:
+ if key in os.environ:
+ distbuild_configuration[key] = os.environ[key]
+
+yaml.dump(distbuild_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/fstab.configure b/extensions/fstab.configure
new file mode 100755
index 00000000..b9154eee
--- /dev/null
+++ b/extensions/fstab.configure
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright © 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-2 =*=
+
+
+import os
+import sys
+
+import morphlib
+
+envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('FSTAB_')}
+
+conf_file = os.path.join(sys.argv[1], 'etc/fstab')
+morphlib.util.write_from_dict(conf_file, envvars)
diff --git a/extensions/hosts b/extensions/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/extensions/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/extensions/hosts.configure b/extensions/hosts.configure
new file mode 100755
index 00000000..6b068d04
--- /dev/null
+++ b/extensions/hosts.configure
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+
+
+import os
+import sys
+import socket
+
+import morphlib
+
+def validate(var, line):
+ xs = line.split()
+ if len(xs) == 0:
+ raise morphlib.Error("`%s: %s': line is empty" % (var, line))
+
+ ip = xs[0]
+ hostnames = xs[1:]
+
+ if len(hostnames) == 0:
+ raise morphlib.Error("`%s: %s': missing hostname" % (var, line))
+
+ family = socket.AF_INET6 if ':' in ip else socket.AF_INET
+
+ try:
+ socket.inet_pton(family, ip)
+ except socket.error:
+ raise morphlib.Error("`%s: %s' invalid ip" % (var, ip))
+
+envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('HOSTS_')}
+
+conf_file = os.path.join(sys.argv[1], 'etc/hosts')
+morphlib.util.write_from_dict(conf_file, envvars, validate)
diff --git a/extensions/image-package-example/README b/extensions/image-package-example/README
new file mode 100644
index 00000000..c1322f25
--- /dev/null
+++ b/extensions/image-package-example/README
@@ -0,0 +1,9 @@
+Image package example scripts
+=============================
+
+These are scripts used to create disk images or install the system onto
+an existing disk.
+
+This is also implemented independently for the rawdisk.write write
+extension; see morphlib.writeexts.WriteExtension.create_local_system()
+for a similar, python implementation.
diff --git a/extensions/image-package-example/common.sh.in b/extensions/image-package-example/common.sh.in
new file mode 100644
index 00000000..9a7389a7
--- /dev/null
+++ b/extensions/image-package-example/common.sh.in
@@ -0,0 +1,72 @@
+#!/bin/false
+# Script library to be used by disk-install.sh and make-disk-image.sh
+
+status(){
+ echo "$@"
+}
+
+info(){
+ echo "$@" >&2
+}
+
+warn(){
+ echo "$@" >&2
+}
+
+extract_rootfs(){
+ tar -C "$1" -xf @@ROOTFS_TAR_PATH@@ .
+}
+
+make_disk_image(){
+ truncate --size "$1" "$2"
+}
+
+format_disk(){
+ local disk="$1"
+ mkfs.ext4 -F -L rootfs "$disk"
+}
+
+install_fs_config(){
+ local mountpoint="$1"
+ local rootdisk="${2-/dev/vda}"
+ cat >>"$mountpoint/etc/fstab" <<EOF
+$rootdisk / ext4 rw,errors=remount-ro 0 0
+EOF
+ install -D -m 644 /proc/self/fd/0 "$mountpoint/boot/extlinux.conf" <<EOF
+DEFAULT baserock
+LABEL baserock
+SAY Booting Baserock
+LINUX /boot/vmlinuz
+APPEND root=$rootdisk
+EOF
+}
+
+install_bootloader(){
+ local disk="$1"
+ local mountpoint="$2"
+ dd if=@@IMAGE_DIR@@/mbr.bin conv=notrunc bs=440 count=1 of="$disk"
+ extlinux --install "$mountpoint/boot"
+}
+
+loop_file(){
+ losetup --show --find "$1"
+}
+unloop_file(){
+ #losetup --detach "$1"
+ # unlooping handled by umount -d, for busybox compatibility
+ true
+}
+
+temp_mount(){
+ local mp="$(mktemp -d)"
+ if ! mount "$@" "$mp"; then
+ rmdir "$mp"
+ return 1
+ fi
+ echo "$mp"
+}
+untemp_mount(){
+ # Unmount and detach in one step for busybox compatibility
+ umount -d "$1"
+ rmdir "$1"
+}
diff --git a/extensions/image-package-example/disk-install.sh.in b/extensions/image-package-example/disk-install.sh.in
new file mode 100644
index 00000000..bc8e0e67
--- /dev/null
+++ b/extensions/image-package-example/disk-install.sh.in
@@ -0,0 +1,51 @@
+#!/bin/sh
+# Script for writing the system to an existing disk.
+# This formats the disk, extracts the rootfs to it, installs the
+# bootloader, and ensures there's appropriate configuration for the
+# bootloader, kernel and userland to agree what the rootfs is.
+
+set -eu
+
+usage(){
+ cat <<EOF
+usage: $0 DISK [TARGET_DISK]
+
+DISK: Where the disk appears on your development machine
+TARGET_DISK: What the disk will appear as on the target machine
+EOF
+}
+
+. @@SCRIPT_DIR@@/common.sh
+
+if [ "$#" -lt 1 -o "$#" -gt 2 ]; then
+ usage
+ exit 1
+fi
+
+DISK="$1"
+TARGET_DISK="${1-/dev/sda}"
+
+status Formatting "$DISK" as ext4
+format_disk "$DISK"
+(
+ info Mounting "$DISK"
+ MP="$(temp_mount -t ext4 "$DISK")"
+ info Mounted "$DISK" to "$MP"
+ set +e
+ (
+ set -e
+ info Copying rootfs onto disk
+ extract_rootfs "$MP"
+ info Configuring disk paths
+ install_fs_config "$MP" "$TARGET_DISK"
+ info Installing bootloader
+ install_bootloader "$DISK" "$MP"
+ )
+ ret="$?"
+ if [ "$ret" != 0 ]; then
+ warn Filling rootfs failed with "$ret"
+ fi
+ info Unmounting "$DISK" from "$MP" and removing "$MP"
+ untemp_mount "$MP"
+ exit "$ret"
+)
diff --git a/extensions/image-package-example/make-disk-image.sh.in b/extensions/image-package-example/make-disk-image.sh.in
new file mode 100644
index 00000000..61264fa0
--- /dev/null
+++ b/extensions/image-package-example/make-disk-image.sh.in
@@ -0,0 +1,36 @@
+#!/bin/sh
+# Script for writing the system to a disk image file.
+# This creates a file of the right size, attaches it to a loop device,
+# then hands the rest of the work off to disk-install.sh
+
+usage(){
+ cat <<EOF
+usage: $0 FILENAME SIZE [TARGET_DISK]
+
+FILENAME: Location to write the disk image to
+SIZE: Size to create the disk image with
+TARGET_DISK: What the disk will appear as on the target machine
+EOF
+}
+
+. @@SCRIPT_DIR@@/common.sh
+
+if [ "$#" -lt 2 -o "$#" -gt 3 ]; then
+ usage
+ exit 1
+fi
+
+DISK_IMAGE="$1"
+DISK_SIZE="$2"
+TARGET_DISK="${3-/dev/vda}"
+
+make_disk_image "$DISK_SIZE" "$DISK_IMAGE"
+
+(
+ LOOP="$(loop_file "$DISK_IMAGE")"
+ set +e
+ @@SCRIPT_DIR@@/disk-install.sh "$DISK_IMAGE" "$TARGET_DISK"
+ ret="$?"
+ unloop_file "$LOOP"
+ exit "$ret"
+)
diff --git a/extensions/image-package.write b/extensions/image-package.write
new file mode 100755
index 00000000..15ceadcf
--- /dev/null
+++ b/extensions/image-package.write
@@ -0,0 +1,168 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+#
+#
+# This is a write extension for making a package that can be used to
+# install the produced system. Ideally we'd instead have Baserock
+# everywhere to do the deployment, but we need to support this workflow
+# until that is possible.
+#
+# This write extension produces a tarball, which contains:
+# - a tarball of the configured system root file system
+# - any supporting files listed in BOOTLOADER_BLOBS
+# - any supporting scripts, generated from templates listed in
+# INCLUDE_SCRIPTS
+#
+# The extension requires the following environment variables to be set:
+#
+# * BOOTLOADER_BLOBS: files to include besides rootfs tarball,
+# paths are relative to the root of the built rootfs
+# works on any kind of file in the rootfs, named
+# BOOTLOADER_BLOBS since that's the common use-case
+# :-separated by default
+# * INCLUDE_SCRIPTS: script templates that are included in the package
+# after being filled out
+# file paths are relative to the definitions repository
+# :-separated by default
+#
+# The script templates may contain any of the following strings, which
+# will be replaced with a string which will expand to the appropriate
+# value as a shell word:
+# - @@SCRIPT_DIR@@: the path the script files are installed to
+# - @@IMAGE_DIR@@: the path BOOTLOADER_BLOBS are installed to
+# - @@ROOTFS_TAR_PATH@@: path to the rootfs tarball
+#
+# The interpolated strings may run commands dependant on the current
+# working directory, so if `cd` is required, bind these values to a
+# variable beforehand.
+#
+# The following optional variables can be set as well:
+#
+# * INCLUDE_SCRIPTS_SEPARATOR: character to separate INCLUDE_SCRIPTS with (default: :)
+# * BOOTLOADER_BLOBS_SEPARATOR: character to separate BOOTLOADER_BLOBS with (default: :)
+# * SCRIPT_SUBDIR: where in the package processed scripts are installed to (default: tools)
+# * IMAGE_SUBDIR: where in the package BOOTLOADER_BLOBS are copied to (default: image_files)
+# * ROOTFS_TAR: name to call the rootfs tarball inside IMAGE_SUBDIR (default: rootfs.tar)
+# * OUTPUT_COMPRESS: compression used for output tarball (default: none)
+# * ROOTFS_COMPRESS: compression used for rootfs (default: none)
+
+set -eu
+
+die(){
+ echo "$@" >&2
+ exit 1
+}
+
+warn(){
+ echo "$@" >&2
+}
+
+info(){
+ echo "$@" >&2
+}
+
+shellescape(){
+ echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'"
+}
+
+sedescape(){
+ # Escape the passed in string so it can be safely interpolated into
+ # a sed expression as a literal value.
+ echo "$1" | sed -e 's/[\/&]/\\&/g'
+}
+
+ROOTDIR="$1"
+OUTPUT_TAR="$2"
+td="$(mktemp -d)"
+IMAGE_SUBDIR="${IMAGE_SUBDIR-image_files}"
+SCRIPT_SUBDIR="${SCRIPT_SUBDIR-tools}"
+ROOTFS_TAR="${ROOTFS_TAR-rootfs.tar}"
+
+# Generate shell snippets that will expand to paths to various resources
+# needed by the scripts.
+# They expand to a single shell word, so constructs like the following work
+# SCRIPT_DIR=@@SCRIPT_DIR@@
+# dd if="$SCRIPT_DIR/mbr" of="$disk" count=1
+# tar -C "$mountpoint" -xf @@ROOTFS_TAR_PATH@@ .
+find_script_dir='"$(readlink -f "$(dirname "$0")")"'
+image_dir="$find_script_dir/../$(shellescape "$IMAGE_SUBDIR")"
+rootfs_tar_path="$image_dir/$(shellescape "$ROOTFS_TAR")"
+
+install_script(){
+ local source_file="$1"
+ local output_dir="$2"
+ local target_file="$output_dir/$SCRIPT_SUBDIR/$(basename "$source_file" .in)"
+ sed -e "s/@@SCRIPT_DIR@@/$(sedescape "$find_script_dir")/g" \
+ -e "s/@@IMAGE_DIR@@/$(sedescape "$image_dir")/g" \
+ -e "s/@@ROOTFS_TAR_PATH@@/$(sedescape "$rootfs_tar_path")/g" \
+ "$source_file" \
+ | install -D -m 755 /proc/self/fd/0 "$target_file"
+}
+
+install_scripts(){
+ local output_dir="$1"
+ (
+ IFS="${INCLUDE_SCRIPTS_SEPARATOR-:}"
+ for script in $INCLUDE_SCRIPTS; do
+ local script_path="$(pwd)/$script"
+ if [ ! -e "$script_path" ]; then
+ warn Script "$script" not found, ignoring
+ continue
+ fi
+ install_script "$script" "$output_dir"
+ done
+ )
+}
+
+install_bootloader_blobs(){
+ local output_dir="$1"
+ local image_dir="$output_dir/$IMAGE_SUBDIR"
+ (
+ IFS="${BOOTLOADER_BLOBS_SEPARATOR-:}"
+ for blob in $BOOTLOADER_BLOBS; do
+ local blob_path="$ROOTDIR/$blob"
+ if [ ! -e "$blob_path" ]; then
+ warn Bootloader blob "$blob" not found, ignoring
+ continue
+ fi
+ install -D -m644 "$blob_path" "$image_dir/$(basename "$blob_path")"
+ done
+ )
+}
+
+# Determine a basename for our directory as the same as our tarball with
+# extensions removed. This is needed, since tarball packages usually
+# have a base directory of its contents, rather then extracting into the
+# current directory.
+output_dir="$(basename "$OUTPUT_TAR")"
+for ext in .xz .bz2 .gzip .gz .tgz .tar; do
+ output_dir="${output_dir%$ext}"
+done
+
+info Installing scripts
+install_scripts "$td/$output_dir"
+
+info Installing bootloader blobs
+install_bootloader_blobs "$td/$output_dir"
+
+info Writing rootfs tar to "$IMAGE_SUBDIR/$ROOTFS_TAR"
+tar -C "$ROOTDIR" -c . \
+| sh -c "${ROOTFS_COMPRESS-cat}" >"$td/$output_dir/$IMAGE_SUBDIR/$ROOTFS_TAR"
+
+info Writing image package tar to "$OUTPUT_TAR"
+tar -C "$td" -c "$output_dir" | sh -c "${OUTPUT_COMPRESS-cat}" >"$OUTPUT_TAR"
diff --git a/extensions/initramfs.write b/extensions/initramfs.write
new file mode 100755
index 00000000..1059defa
--- /dev/null
+++ b/extensions/initramfs.write
@@ -0,0 +1,26 @@
+#!/bin/sh
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-2 =*=
+
+set -e
+
+ROOTDIR="$1"
+INITRAMFS_PATH="$2"
+
+(cd "$ROOTDIR" &&
+ find . -print0 |
+ cpio -0 -H newc -o) |
+ gzip -c | install -D -m644 /dev/stdin "$INITRAMFS_PATH"
diff --git a/extensions/initramfs.write.help b/extensions/initramfs.write.help
new file mode 100644
index 00000000..54d3ae8c
--- /dev/null
+++ b/extensions/initramfs.write.help
@@ -0,0 +1,55 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Create an initramfs for a system by taking an existing system and
+ converting it to the appropriate format.
+
+ The system must have a `/init` executable as the userland entry-point.
+ This can have a different path, if `rdinit=$path` is added to
+ the kernel command line. This can be added to the `rawdisk`,
+ `virtualbox-ssh` and `kvm` write extensions with the `KERNEL_CMDLINE`
+ option.
+
+ It is possible to use a ramfs as the final rootfs without a `/init`
+ executable, by setting `root=/dev/mem`, or `rdinit=/sbin/init`,
+ but this is beyond the scope for the `initramfs.write` extension.
+
+ The intended use of initramfs.write is to be part of a nested
+ deployment, so the parent system has an initramfs stored as
+ `/boot/initramfs.gz`. See the following example:
+
+ name: initramfs-test
+ kind: cluster
+ systems:
+ - morph: minimal-system-x86_64-generic
+ deploy:
+ system:
+ type: rawdisk
+ location: initramfs-system-x86_64.img
+ DISK_SIZE: 1G
+ HOSTNAME: initramfs-system
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: initramfs-x86_64
+ deploy:
+ initramfs:
+ type: initramfs
+ location: boot/initramfs.gz
+
+ Parameters:
+
+ * location: the path where the initramfs will be installed (e.g.
+ `boot/initramfs.gz`) in the above example
diff --git a/extensions/install-essential-files.configure b/extensions/install-essential-files.configure
new file mode 100755
index 00000000..2779b0d4
--- /dev/null
+++ b/extensions/install-essential-files.configure
@@ -0,0 +1,42 @@
+#!/usr/bin/env python2
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+''' A Morph configuration extension for adding essential files to a system
+
+It will read the manifest files located in essential-files/manifest,
+then use the contens of those files to determine which files
+to install into the target system.
+
+'''
+
+import subprocess
+import os
+
+import cliapp
+
+class InstallEssentialFilesConfigureExtension(cliapp.Application):
+
+ def process_args(self, args):
+ target_root = args[0]
+ os.environ["INSTALL_FILES"] = "essential-files/manifest"
+ self.install_essential_files(target_root)
+
+ def install_essential_files(self, target_root):
+ command = os.path.join(os.path.dirname(__file__),
+ "install-files.configure")
+ subprocess.check_call([command, target_root])
+
+InstallEssentialFilesConfigureExtension().run()
diff --git a/extensions/install-essential-files.configure.help b/extensions/install-essential-files.configure.help
new file mode 100644
index 00000000..9148aeff
--- /dev/null
+++ b/extensions/install-essential-files.configure.help
@@ -0,0 +1,20 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ This installs files from the essential-files/ folder in your
+ definitions.git repo, according to essential-files/manifest.
+
+ It wraps the install-files.configure extension. Take a look to that
+ extension help to know more about the format of the manifest file.
diff --git a/extensions/install-files.configure b/extensions/install-files.configure
new file mode 100755
index 00000000..341cce61
--- /dev/null
+++ b/extensions/install-files.configure
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+''' A Morph configuration extension for adding arbitrary files to a system
+
+It will read the manifest files specified in the environment variable
+INSTALL_FILES, then use the contens of those files to determine which files
+to install into the target system.
+
+'''
+
+import cliapp
+import os
+import errno
+import re
+import sys
+import shlex
+import shutil
+import stat
+
+try:
+ import jinja2
+ jinja_available = True
+except ImportError:
+ jinja_available = False
+
+class InstallFilesConfigureExtension(cliapp.Application):
+
+ def process_args(self, args):
+ if not 'INSTALL_FILES' in os.environ:
+ return
+ target_root = args[0]
+ manifests = shlex.split(os.environ['INSTALL_FILES'])
+ for manifest in manifests:
+ self.install_manifest(manifest, target_root)
+
+ def install_manifest(self, manifest, target_root):
+ manifest_dir = os.path.dirname(manifest)
+ with open(manifest) as f:
+ entries = f.readlines()
+ for entry in entries:
+ self.install_entry(entry, manifest_dir, target_root)
+
+ def force_symlink(self, source, link_name):
+ try:
+ os.symlink(source, link_name)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ os.remove(link_name)
+ os.symlink(source, link_name)
+
+ def install_entry(self, entry, manifest_root, target_root):
+ m = re.match('(template )?(overwrite )?'
+ '([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry)
+
+ if m:
+ template = m.group(1)
+ overwrite = m.group(2)
+ mode = int(m.group(3), 8) # mode is octal
+ uid = int(m.group(4))
+ gid = int(m.group(5))
+ path = m.group(6)
+ else:
+ raise cliapp.AppException('Invalid manifest entry, '
+ 'format: [template] [overwrite] '
+ '<octal mode> <uid decimal> <gid decimal> <filename>')
+
+ dest_path = os.path.join(target_root, './' + path)
+ if stat.S_ISDIR(mode):
+ if os.path.exists(dest_path) and not overwrite:
+ dest_stat = os.stat(dest_path)
+ if (mode != dest_stat.st_mode
+ or uid != dest_stat.st_uid
+ or gid != dest_stat.st_gid):
+ raise cliapp.AppException('"%s" exists and is not '
+ 'identical to directory '
+ '"%s"' % (dest_path, entry))
+ else:
+ os.mkdir(dest_path, mode)
+ os.chown(dest_path, uid, gid)
+ os.chmod(dest_path, mode)
+
+ elif stat.S_ISLNK(mode):
+ if os.path.lexists(dest_path) and not overwrite:
+ raise cliapp.AppException('Symlink already exists at %s'
+ % dest_path)
+ else:
+ linkdest = os.readlink(os.path.join(manifest_root,
+ './' + path))
+ self.force_symlink(linkdest, dest_path)
+ os.lchown(dest_path, uid, gid)
+
+ elif stat.S_ISREG(mode):
+ if os.path.lexists(dest_path) and not overwrite:
+ raise cliapp.AppException('File already exists at %s'
+ % dest_path)
+ else:
+ if template:
+ if not jinja_available:
+ raise cliapp.AppException(
+ "Failed to install template file `%s': "
+ 'install-files templates require jinja2'
+ % path)
+
+ loader = jinja2.FileSystemLoader(manifest_root)
+ env = jinja2.Environment(loader=loader,
+ keep_trailing_newline=True)
+
+ env.get_template(path).stream(os.environ).dump(dest_path)
+ else:
+ shutil.copyfile(os.path.join(manifest_root, './' + path),
+ dest_path)
+
+ os.chown(dest_path, uid, gid)
+ os.chmod(dest_path, mode)
+
+ else:
+ raise cliapp.AppException('Mode given in "%s" is not a file,'
+ ' symlink or directory' % entry)
+
+InstallFilesConfigureExtension().run()
diff --git a/extensions/install-files.configure.help b/extensions/install-files.configure.help
new file mode 100644
index 00000000..991c26c8
--- /dev/null
+++ b/extensions/install-files.configure.help
@@ -0,0 +1,74 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ Install a set of files onto a system
+
+ To use this extension you create a directory of files you want to install
+ onto the target system.
+
+ In this example we want to copy some ssh keys onto a system
+
+ % mkdir sshkeyfiles
+ % mkdir -p sshkeyfiles/root/.ssh
+ % cp id_rsa sshkeyfiles/root/.ssh
+ % cp id_rsa.pub sshkeyfiles/root/.ssh
+
+ Now we need to create a manifest file to set the file modes
+ and persmissions. The manifest file should be created inside the
+ directory that contains the files we're trying to install.
+
+ cat << EOF > sshkeyfiles/manifest
+ 0040755 0 0 /root/.ssh
+ 0100600 0 0 /root/.ssh/id_rsa
+ 0100644 0 0 /root/.ssh/id_rsa.pub
+ EOF
+
+ Then we add the path to our manifest to our cluster morph,
+ this path should be relative to the system definitions repository.
+
+ INSTALL_FILES: sshkeysfiles/manifest
+
+ More generally entries in the manifest are formatted as:
+ [overwrite] <octal mode> <uid decimal> <gid decimal> <filename>
+
+ NOTE: Directories on the target must be created if they do not exist.
+
+ The extension supports files, symlinks and directories.
+
+ For example,
+
+ 0100644 0 0 /etc/issue
+
+ creates a regular file at /etc/issue with 644 permissions,
+ uid 0 and gid 0, if the file doesn't already exist.
+
+ overwrite 0100644 0 0 /etc/issue
+
+ creates a regular file at /etc/issue with 644 permissions,
+ uid 0 and gid 0, if the file already exists it is overwritten.
+
+ 0100755 0 0 /usr/bin/foo
+
+ creates an executable file at /usr/bin/foo
+
+ 0040755 0 0 /etc/foodir
+
+ creates a directory with 755 permissions
+
+ 0120000 0 0 /usr/bin/bar
+
+ creates a symlink at /usr/bin/bar
+
+ NOTE: You will still need to make a symlink in the manifest directory.
diff --git a/extensions/installer.configure b/extensions/installer.configure
new file mode 100755
index 00000000..a77dc851
--- /dev/null
+++ b/extensions/installer.configure
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to configure an installer
+# system. It will create the configuration needed in the installer system
+# to perform an installation. It uses the following variables from the
+# environment:
+#
+# * INSTALLER_TARGET_STORAGE_DEVICE
+# * INSTALLER_ROOTFS_TO_INSTALL
+# * INSTALLER_POST_INSTALL_COMMAND (optional, defaults to `reboot -f`)
+
+import os
+import sys
+import yaml
+
+install_config_file = os.path.join(sys.argv[1], 'etc', 'install.conf')
+
+try:
+ installer_configuration = {
+ 'INSTALLER_TARGET_STORAGE_DEVICE': os.environ['INSTALLER_TARGET_STORAGE_DEVICE'],
+ 'INSTALLER_ROOTFS_TO_INSTALL': os.environ['INSTALLER_ROOTFS_TO_INSTALL'],
+ }
+except KeyError as e:
+ print "Not configuring as an installer system"
+ sys.exit(0)
+
+postinstkey = 'INSTALLER_POST_INSTALL_COMMAND'
+installer_configuration[postinstkey] = os.environ.get(postinstkey, 'reboot -f')
+
+with open(install_config_file, 'w') as f:
+ f.write( yaml.dump(installer_configuration, default_flow_style=False) )
+
+print "Configuration of the installer system in %s" % install_config_file
diff --git a/extensions/jffs2.write b/extensions/jffs2.write
new file mode 100644
index 00000000..46b69a53
--- /dev/null
+++ b/extensions/jffs2.write
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+#-*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for creating images with jffs2
+ as the root filesystem.'''
+
+
+import cliapp
+import os
+
+import morphlib.writeexts
+
+
+class Jffs2WriteExtension(morphlib.writeexts.WriteExtension):
+
+ '''See jffs2.write.help for documentation.'''
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+
+ try:
+ self.create_jffs2_system(temp_root, location)
+ self.status(msg='Disk image has been created at %(location)s',
+ location = location)
+ except Exception:
+ self.status(msg='Failure to deploy system to %(location)s',
+ location = location)
+ raise
+
+ def create_jffs2_system(self, temp_root, location):
+ erase_block = self.get_erase_block_size()
+ cliapp.runcmd(
+ ['mkfs.jffs2', '--pad', '--no-cleanmarkers',
+ '--eraseblock='+erase_block, '-d', temp_root, '-o', location])
+
+ def get_erase_block_size(self):
+ erase_block = os.environ.get('ERASE_BLOCK', '')
+
+ if erase_block == '':
+ raise cliapp.AppException('ERASE_BLOCK was not given')
+
+ if not erase_block.isdigit():
+ raise cliapp.AppException('ERASE_BLOCK must be a whole number')
+
+ return erase_block
+
+Jffs2WriteExtension().run()
diff --git a/extensions/jffs2.write.help b/extensions/jffs2.write.help
new file mode 100644
index 00000000..059a354b
--- /dev/null
+++ b/extensions/jffs2.write.help
@@ -0,0 +1,28 @@
+#-*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Creates a system produced by Morph build with a jffs2 filesystem and then
+ writes to an image. To use this extension, the host system must have access
+ to mkfs.jffs2 which is provided in the mtd-utilities.morph stratum.
+
+ Parameters:
+
+ * location: the pathname of the disk image to be created/upgraded, or the
+ path to the physical device.
+
+ * ERASE_BLOCK: the erase block size of the target system, which can be
+ found in '/sys/class/mtd/mtdx/erasesize'
diff --git a/extensions/kvm.check b/extensions/kvm.check
new file mode 100755
index 00000000..67cb3d38
--- /dev/null
+++ b/extensions/kvm.check
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'kvm' write extension'''
+
+import cliapp
+import os
+import re
+import urlparse
+
+import morphlib.writeexts
+
+
+class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension):
+
+ location_pattern = '^/(?P<guest>[^/]+)(?P<path>/.+)$'
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise cliapp.AppException(
+ 'Use the `ssh-rsync` write extension to deploy upgrades to an '
+ 'existing remote system.')
+
+ location = args[0]
+ ssh_host, vm_name, vm_path = self.check_and_parse_location(location)
+
+ self.check_ssh_connectivity(ssh_host)
+ self.check_can_create_file_at_given_path(ssh_host, vm_path)
+ self.check_no_existing_libvirt_vm(ssh_host, vm_name)
+ self.check_extra_disks_exist(ssh_host, self.parse_attach_disks())
+ self.check_virtual_networks_are_started(ssh_host)
+ self.check_host_has_virtinstall(ssh_host)
+
+ def check_and_parse_location(self, location):
+ '''Check and parse the location argument to get relevant data.'''
+
+ x = urlparse.urlparse(location)
+
+ if x.scheme != 'kvm+ssh':
+ raise cliapp.AppException(
+ 'URL schema must be kvm+ssh in %s' % location)
+
+ m = re.match(self.location_pattern, x.path)
+ if not m:
+ raise cliapp.AppException('Cannot parse location %s' % location)
+
+ return x.netloc, m.group('guest'), m.group('path')
+
+ def check_no_existing_libvirt_vm(self, ssh_host, vm_name):
+ try:
+ cliapp.ssh_runcmd(ssh_host,
+ ['virsh', '--connect', 'qemu:///system', 'domstate', vm_name])
+ except cliapp.AppException as e:
+ pass
+ else:
+ raise cliapp.AppException(
+ 'Host %s already has a VM named %s. You can use the ssh-rsync '
+ 'write extension to deploy upgrades to existing machines.' %
+ (ssh_host, vm_name))
+
+ def check_can_create_file_at_given_path(self, ssh_host, vm_path):
+
+ def check_can_write_to_given_path():
+ try:
+ cliapp.ssh_runcmd(ssh_host, ['touch', vm_path])
+ except cliapp.AppException as e:
+ raise cliapp.AppException("Can't write to location %s on %s"
+ % (vm_path, ssh_host))
+ else:
+ cliapp.ssh_runcmd(ssh_host, ['rm', vm_path])
+
+ try:
+ cliapp.ssh_runcmd(ssh_host, ['test', '-e', vm_path])
+ except cliapp.AppException as e:
+ # vm_path doesn't already exist, so let's test we can write
+ check_can_write_to_given_path()
+ else:
+ raise cliapp.AppException('%s already exists on %s'
+ % (vm_path, ssh_host))
+
+ def check_extra_disks_exist(self, ssh_host, filename_list):
+ for filename in filename_list:
+ try:
+ cliapp.ssh_runcmd(ssh_host, ['ls', filename])
+ except cliapp.AppException as e:
+ raise cliapp.AppException('Did not find file %s on host %s' %
+ (filename, ssh_host))
+
+ def check_virtual_networks_are_started(self, ssh_host):
+
+ def check_virtual_network_is_started(network_name):
+ cmd = ['virsh', '-c', 'qemu:///system', 'net-info', network_name]
+ net_info = cliapp.ssh_runcmd(ssh_host, cmd).split('\n')
+
+ def pretty_concat(lines):
+ return '\n'.join(['\t%s' % line for line in lines])
+
+ for line in net_info:
+ m = re.match('^Active:\W*(\w+)\W*', line)
+ if m:
+ break
+ else:
+ raise cliapp.AppException(
+ "Got unexpected output parsing output of `%s':\n%s"
+ % (' '.join(cmd), pretty_concat(net_info)))
+
+ network_active = m.group(1) == 'yes'
+
+ if not network_active:
+ raise cliapp.AppException("Network '%s' is not started"
+ % network_name)
+
+ def name(nic_entry):
+ if ',' in nic_entry:
+ # network=NETWORK_NAME,mac=12:34,model=e1000...
+ return nic_entry[:nic_entry.find(',')].lstrip('network=')
+ else:
+ return nic_entry.lstrip('network=') # NETWORK_NAME
+
+ if 'NIC_CONFIG' in os.environ:
+ nics = os.environ['NIC_CONFIG'].split()
+
+ for n in nics:
+ if not (n.startswith('network=')
+ or n.startswith('bridge=')
+ or n == 'user'):
+ raise cliapp.AppException('malformed NIC_CONFIG: %s\n'
+ " (expected 'bridge=BRIDGE' 'network=NAME'"
+ " or 'user')" % n)
+
+ # --network bridge= is used to specify a bridge
+ # --network user is used to specify a form of NAT
+ # (see the virt-install(1) man page)
+ networks = [name(n) for n in nics if not n.startswith('bridge=')
+ and not n.startswith('user')]
+ else:
+ networks = ['default']
+
+ for network in networks:
+ check_virtual_network_is_started(network)
+
+ def check_host_has_virtinstall(self, ssh_host):
+ try:
+ cliapp.ssh_runcmd(ssh_host, ['which', 'virt-install'])
+ except cliapp.AppException:
+ raise cliapp.AppException(
+ 'virt-install does not seem to be installed on host %s'
+ % ssh_host)
+
+
+KvmPlusSshCheckExtension().run()
diff --git a/extensions/kvm.write b/extensions/kvm.write
new file mode 100755
index 00000000..0d0c095b
--- /dev/null
+++ b/extensions/kvm.write
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+# Copyright (C) 2012-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to KVM+libvirt.
+
+See file kvm.write.help for documentation
+
+'''
+
+
+import cliapp
+import os
+import re
+import sys
+import tempfile
+import urlparse
+
+import morphlib.writeexts
+
+
+class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension):
+
+ location_pattern = '^/(?P<guest>[^/]+)(?P<path>/.+)$'
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+ ssh_host, vm_name, vm_path = self.parse_location(location)
+ autostart = self.get_environment_boolean('AUTOSTART')
+
+ fd, raw_disk = tempfile.mkstemp()
+ os.close(fd)
+ self.create_local_system(temp_root, raw_disk)
+
+ try:
+ self.transfer(raw_disk, ssh_host, vm_path)
+ self.create_libvirt_guest(ssh_host, vm_name, vm_path, autostart)
+ except BaseException:
+ sys.stderr.write('Error deploying to libvirt')
+ os.remove(raw_disk)
+ cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vm_path])
+ raise
+ else:
+ os.remove(raw_disk)
+
+ self.status(
+ msg='Virtual machine %(vm_name)s has been created',
+ vm_name=vm_name)
+
+ def parse_location(self, location):
+ '''Parse the location argument to get relevant data.'''
+
+ x = urlparse.urlparse(location)
+ m = re.match('^/(?P<guest>[^/]+)(?P<path>/.+)$', x.path)
+ return x.netloc, m.group('guest'), m.group('path')
+
+ def transfer(self, raw_disk, ssh_host, vm_path):
+ '''Transfer raw disk image to libvirt host.'''
+
+ self.status(msg='Transferring disk image')
+
+ xfer_hole_path = morphlib.util.get_data_path('xfer-hole')
+ recv_hole = morphlib.util.get_data('recv-hole')
+
+ ssh_remote_cmd = [
+ 'sh', '-c', recv_hole, 'dummy-argv0', 'file', vm_path
+ ]
+
+ cliapp.runcmd(
+ ['python', xfer_hole_path, raw_disk],
+ ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd),
+ stdout=None, stderr=None)
+
+ def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart):
+ '''Create the libvirt virtual machine.'''
+
+ self.status(msg='Creating libvirt/kvm virtual machine')
+
+ attach_disks = self.parse_attach_disks()
+ attach_opts = []
+ for disk in attach_disks:
+ attach_opts.extend(['--disk', 'path=%s' % disk])
+
+ if 'NIC_CONFIG' in os.environ:
+ nics = os.environ['NIC_CONFIG'].split()
+ for nic in nics:
+ attach_opts.extend(['--network', nic])
+
+ ram_mebibytes = str(self.get_ram_size() / (1024**2))
+
+ vcpu_count = str(self.get_vcpu_count())
+
+ cmdline = ['virt-install', '--connect', 'qemu:///system',
+ '--import', '--name', vm_name, '--vnc',
+ '--ram', ram_mebibytes, '--vcpus', vcpu_count,
+ '--disk', 'path=%s,bus=ide' % vm_path] + attach_opts
+ if not autostart:
+ cmdline += ['--noreboot']
+ cliapp.ssh_runcmd(ssh_host, cmdline)
+
+ if autostart:
+ cliapp.ssh_runcmd(ssh_host,
+ ['virsh', '--connect', 'qemu:///system', 'autostart', vm_name])
+
+KvmPlusSshWriteExtension().run()
diff --git a/extensions/kvm.write.help b/extensions/kvm.write.help
new file mode 100644
index 00000000..812a5309
--- /dev/null
+++ b/extensions/kvm.write.help
@@ -0,0 +1,90 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Deploy a Baserock system as a *new* KVM/LibVirt virtual machine.
+
+ Use the `ssh-rsync` write extension to deploy upgrades to an *existing* VM
+
+ Parameters:
+
+ * location: a custom URL scheme of the form `kvm+ssh://HOST/GUEST/PATH`,
+ where:
+ * HOST is the name of the host on which KVM/LibVirt is running
+ * GUEST is the name of the guest VM on that host
+ * PATH is the path to the disk image that should be created,
+ on that host. For example,
+ `kvm+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where
+ * `alice@192.168.122.1` is the target host as given to ssh,
+ **from within the development host** (which may be
+ different from the target host's normal address);
+ * `testsys` is the name of the new guest VM';
+ * `/home/alice/testys.img` is the pathname of the disk image files
+ on the target host.
+
+ * HOSTNAME=name: the hostname of the **guest** VM within the network into
+ which it is being deployed
+
+ * DISK_SIZE=X: the size of the VM's primary virtual hard disk. `X` should
+ use a suffix of `K`, `M`, or `G` (in upper or lower case) to indicate
+ kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a
+ 100 gigabyte disk image. **This parameter is mandatory**.
+
+ * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate
+ for itself from the host. `X` is interpreted in the same was as for
+ DISK_SIZE`, and defaults to `1G`
+
+ * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do
+ not use more CPU cores than you have available physically (real cores, no
+ hyperthreads)
+
+ * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to
+ tell Linux to use, rather than booting the rootfs directly.
+
+ * AUTOSTART=<VALUE>` - boolean. If it is set, the VM will be started when
+ it has been deployed.
+
+ * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree
+ binary - Give the full path (without a leading /) to the location of the
+ DTB in the built system image . The deployment will fail if `path` does
+ not exist.
+
+ * BOOTLOADER_INSTALL=value: the bootloader to be installed
+ **(MANDATORY)** for non-x86 systems
+
+ allowed values =
+ - 'extlinux' (default) - the extlinux bootloader will
+ be installed
+ - 'none' - no bootloader will be installed by `morph deploy`. A
+ bootloader must be installed manually. This value must be used when
+ deploying non-x86 systems such as ARM.
+
+ * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used.
+ If not specified for x86-32 and x86-64 systems, 'extlinux' will be used
+
+ allowed values =
+ - 'extlinux'
+
+ * KERNEL_ARGS=args: optional additional kernel command-line parameters to
+ be appended to the default set. The default set is:
+
+ 'rw init=/sbin/init rootfstype=btrfs \
+ rootflags=subvol=systems/default/run \
+ root=[name or UUID of root filesystem]'
+
+ (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt)
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/extensions/mason.configure b/extensions/mason.configure
new file mode 100644
index 00000000..40fdfe46
--- /dev/null
+++ b/extensions/mason.configure
@@ -0,0 +1,153 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to fully configure
+# a Mason instance at deployment time. It uses the following variables
+# from the environment:
+#
+# * ARTIFACT_CACHE_SERVER
+# * MASON_CLUSTER_MORPHOLOGY
+# * MASON_DEFINITIONS_REF
+# * MASON_DISTBUILD_ARCH
+# * MASON_TEST_HOST
+# * OPENSTACK_NETWORK_ID
+# * TEST_INFRASTRUCTURE_TYPE
+# * TROVE_HOST
+# * TROVE_ID
+# * CONTROLLERHOST
+
+set -e
+
+##########################################################################
+# Copy Mason files into root filesystem
+##########################################################################
+
+ROOT="$1"
+
+mkdir -p "$ROOT"/usr/lib/mason
+cp extensions/mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh
+cp extensions/mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh
+cp extensions/mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script
+
+cp extensions/mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer
+
+cp extensions/mason/mason.service "$ROOT"/etc/systemd/system/mason.service
+
+##########################################################################
+# Set up httpd web server
+##########################################################################
+
+cp extensions/mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service
+
+mkdir -p "$ROOT"/srv/mason
+
+cat >>"$ROOT"/etc/httpd.conf <<EOF
+.log:text/plain
+EOF
+
+mkdir -p "$ROOT"/var/mason
+
+##########################################################################
+# Copy files needed for Ansible configuration
+##########################################################################
+
+mkdir -p "$ROOT/usr/share/mason-setup"
+mkdir -p "$ROOT/usr/lib/mason-setup"
+
+cp extensions/mason/share/* "$ROOT/usr/share/mason-setup"
+cp -r extensions/mason/ansible "$ROOT/usr/lib/mason-setup/"
+cp extensions/mason/mason-setup.service "$ROOT"/etc/systemd/system/mason-setup.service
+
+ln -s ../mason-setup.service "$ROOT"/etc/systemd/system/multi-user.target.wants/mason-setup.service
+
+##########################################################################
+# Check variables
+##########################################################################
+
+if [ -n "$MASON_GENERIC" ]; then
+ echo Not configuring Mason, it will be generic
+ exit 0
+fi
+
+if [ -z "$MASON_CLUSTER_MORPHOLOGY" -a \
+ -z "$MASON_DEFINITIONS_REF" -a \
+ -z "$MASON_DISTBUILD_ARCH" -a \
+ -z "$MASON_TEST_HOST" ]; then
+ # No Mason options defined, do nothing.
+ exit 0
+fi
+
+if [ -z "$ARTIFACT_CACHE_SERVER" -o \
+ -z "$CONTROLLERHOST" -o \
+ -z "$MASON_CLUSTER_MORPHOLOGY" -o \
+ -z "$MASON_DEFINITIONS_REF" -o \
+ -z "$MASON_DISTBUILD_ARCH" -o \
+ -z "$MASON_TEST_HOST" -o \
+ -z "$TROVE_HOST" -o \
+ -z "$TROVE_ID" ]; then
+ echo Some options required for Mason were defined, but not all.
+ exit 1
+fi
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+MASON_DATA="$ROOT/etc/mason"
+mkdir -p "$MASON_DATA"
+
+python <<'EOF' >"$MASON_DATA/mason.conf"
+import os, sys, yaml
+
+mason_configuration={
+ 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'],
+ 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'],
+ 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'],
+ 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'],
+ 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'],
+ 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'],
+ 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'],
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_HOST': os.environ['TROVE_HOST'],
+ 'CONTROLLERHOST': os.environ['CONTROLLERHOST'],
+}
+
+yaml.dump(mason_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
+ python <<'EOF' >>"$MASON_DATA/mason.conf"
+import os, sys, yaml
+
+openstack_credentials={
+ 'OS_USERNAME': os.environ['OPENSTACK_USER'],
+ 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'],
+ 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'],
+ 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'],
+ 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'],
+}
+
+yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False)
+EOF
+fi
+
+##########################################################################
+# Enable services
+##########################################################################
+
+ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer
+ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service
diff --git a/extensions/mason/ansible/hosts b/extensions/mason/ansible/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/extensions/mason/ansible/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/extensions/mason/ansible/mason-setup.yml b/extensions/mason/ansible/mason-setup.yml
new file mode 100644
index 00000000..d1528dbb
--- /dev/null
+++ b/extensions/mason/ansible/mason-setup.yml
@@ -0,0 +1,83 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/mason/mason.conf"
+ tasks:
+
+
+ - fail: msg='TROVE_ID is mandatory'
+ when: TROVE_ID is not defined
+
+ - fail: msg='TROVE_HOST is mandatory'
+ when: TROVE_HOST is not defined
+
+ - fail: msg='ARTIFACT_CACHE_SERVER is mandatory'
+ when: ARTIFACT_CACHE_SERVER is not defined
+
+ - fail: msg='MASON_CLUSTER_MORPHOLOGY is mandatory'
+ when: MASON_CLUSTER_MORPHOLOGY is not defined
+
+ - fail: msg='MASON_DEFINITIONS_REF is mandatory'
+ when: MASON_DEFINITIONS_REF is not defined
+
+ - fail: msg='MASON_DISTBUILD_ARCH is mandatory'
+ when: MASON_DISTBUILD_ARCH is not defined
+
+ - fail: msg='MASON_TEST_HOST is mandatory'
+ when: MASON_TEST_HOST is not defined
+
+ - fail: msg='CONTROLLERHOST is mandatory'
+ when: CONTROLLERHOST is not defined
+
+ - fail: msg='TEST_INFRASTRUCTURE_TYPE is mandatory'
+ when: TEST_INFRASTRUCTURE_TYPE is not defined
+
+ - fail: msg='OPENSTACK_NETWORK_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OPENSTACK_NETWORK_ID is not defined
+
+ - fail: msg='OS_USERNAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_USERNAME is not defined
+
+ - fail: msg='OS_PASSWORD is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_PASSWORD is not defined
+
+ - fail: msg='OS_TENANT_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_ID is not defined
+
+ - fail: msg='OS_TENANT_NAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_NAME is not defined
+
+ - fail: msg='OS_AUTH_URL is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_AUTH_URL is not defined
+
+ - name: Create the Mason configuration file
+ template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - mason.conf
+
+ - name: Create the OpenStack credentials file
+ template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - os.conf
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack"
+
+ - name: Enable the mason service
+ service: name=mason.service enabled=yes
+ register: mason_service
+ - name: Restart the mason service
+ service: name=mason.service state=restarted
+ when: mason_service|changed
+
+ - name: Enable the mason timer
+ service: name=mason.timer enabled=yes
+ register: mason_timer
+ - name: Restart the mason timer
+ service: name=mason.timer state=restarted
+ when: mason_timer|changed
+
+ - name: Enable the httpd service
+ service: name=httpd.service enabled=yes
+ register: httpd_service
+ - name: Restart the httpd service
+ service: name=httpd state=restarted
+ when: httpd_service|changed
diff --git a/extensions/mason/httpd.service b/extensions/mason/httpd.service
new file mode 100644
index 00000000..7572b732
--- /dev/null
+++ b/extensions/mason/httpd.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=HTTP server for Mason
+After=network.target
+
+[Service]
+User=root
+ExecStart=/usr/sbin/httpd -f -p 80 -h /srv/mason
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extensions/mason/mason-generator.sh b/extensions/mason/mason-generator.sh
new file mode 100755
index 00000000..187db72c
--- /dev/null
+++ b/extensions/mason/mason-generator.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+set -e
+
+if [ "$#" -lt 5 -o "$#" -gt 6 -o "$1" == "-h" -o "$1" == "--help" ]; then
+ cat <<EOF
+Usage:
+ `basename $0` HOST_PREFIX UPSTREAM_TROVE_HOSTNAME VM_USER VM_HOST VM_PATH [HOST_POSTFIX]
+
+Where:
+ HOST_PREFIX -- Name of your Mason instance
+ e.g. "my-mason" to produce hostnames:
+ my-mason-trove and my-mason-controller
+ UPSTREAM_TROVE_HOSTNAME -- Upstream trove's hostname
+ VM_USER -- User on VM host for VM deployment
+ VM_HOST -- VM host for VM deployment
+ VM_PATH -- Path to store VM images in on VM host
+ HOST_POSTFIX -- e.g. ".example.com" to get
+ my-mason-trove.example.com
+
+This script makes deploying a Mason system simpler by automating
+the generation of keys for the systems to use, building of the
+systems, filling out the mason deployment cluster morphology
+template with useful values, and finally deploying the systems.
+
+To ensure that the deployed system can deploy test systems, you
+must supply an ssh key to the VM host. Do so with the following
+command:
+ ssh-copy-id -i ssh_keys-HOST_PREFIX/worker.key.pub VM_USER@VM_HOST
+
+To ensure that the mason can upload artifacts to the upstream trove,
+you must supply an ssh key to the upstream trove. Do so with the
+following command:
+ ssh-copy-id -i ssh_keys-HOST_PREFIX/id_rsa.key.pub root@UPSTREAM_TROVE_HOSTNAME
+
+EOF
+ exit 0
+fi
+
+
+HOST_PREFIX="$1"
+UPSTREAM_TROVE="$2"
+VM_USER="$3"
+VM_HOST="$4"
+VM_PATH="$5"
+HOST_POSTFIX="$6"
+
+sedescape() {
+ # Escape all non-alphanumeric characters
+ printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
+}
+
+
+##############################################################################
+# Key generation
+##############################################################################
+
+mkdir -p "ssh_keys-${HOST_PREFIX}"
+cd "ssh_keys-${HOST_PREFIX}"
+test -e mason.key || ssh-keygen -t rsa -b 2048 -f mason.key -C mason@TROVE_HOST -N ''
+test -e lorry.key || ssh-keygen -t rsa -b 2048 -f lorry.key -C lorry@TROVE_HOST -N ''
+test -e worker.key || ssh-keygen -t rsa -b 2048 -f worker.key -C worker@TROVE_HOST -N ''
+test -e id_rsa || ssh-keygen -t rsa -b 2048 -f id_rsa -C trove-admin@TROVE_HOST -N ''
+cd ../
+
+
+##############################################################################
+# Mason setup
+##############################################################################
+
+cp clusters/mason.morph mason-${HOST_PREFIX}.morph
+
+sed -i "s/red-box-v1/$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/ssh_keys/ssh_keys-$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/upstream-trove/$(sedescape "$UPSTREAM_TROVE")/" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-user/$(sedescape "$VM_USER")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-host/$(sedescape "$VM_HOST")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-path/$(sedescape "$VM_PATH")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/\.example\.com/$(sedescape "$HOST_POSTFIX")/g" "mason-$HOST_PREFIX.morph"
+
+
+##############################################################################
+# System building
+##############################################################################
+
+morph build systems/trove-system-x86_64.morph
+morph build systems/build-system-x86_64.morph
+
+
+##############################################################################
+# System deployment
+##############################################################################
+
+morph deploy mason-${HOST_PREFIX}.morph
+
+
+##############################################################################
+# Cleanup
+##############################################################################
+
+rm mason-${HOST_PREFIX}.morph
diff --git a/extensions/mason/mason-report.sh b/extensions/mason/mason-report.sh
new file mode 100755
index 00000000..9c20b65b
--- /dev/null
+++ b/extensions/mason/mason-report.sh
@@ -0,0 +1,252 @@
+#!/bin/bash
+
+set -x
+
+. /etc/mason.conf
+
+REPORT_PATH=/var/mason/report.html
+SERVER_PATH=/srv/mason
+
+sed_escape() {
+ printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
+}
+
+create_report() {
+cat > $REPORT_PATH <<'EOF'
+<html>
+<head>
+<meta charset="UTF-8">
+<meta http-equiv="refresh" content="60">
+<style>
+html, body {
+ margin: 0;
+ padding: 0;
+}
+p.branding {
+ background: black;
+ color: #fff;
+ padding: 0.4em;
+ margin: 0;
+ font-weight: bold;
+}
+h1 {
+ background: #225588;
+ color: white;
+ margin: 0;
+ padding: 0.6em;
+}
+table {
+ width: 90%;
+ margin: 1em auto 6em auto;
+ border: 1px solid black;
+ border-spacing: 0;
+}
+table tr.headings {
+ background: #555;
+ color: white;
+}
+table tr.pass {
+ background: #aaffaa;
+}
+table tr.pass:hover {
+ background: #bbffbb;
+}
+table tr.fail {
+ background: #ffaaaa;
+}
+table tr.fail:hover {
+ background: #ffbbbb;
+}
+table tr.nonet {
+ background: #ffdd99;
+}
+table tr.nonet:hover {
+ background: #ffeeaa;
+}
+table tr.headings th {
+ font-weight: bold;
+ text-align: left;
+ padding: 3px 2px;
+}
+table td {
+ padding: 2px;
+}
+td.result {
+ font-weight: bold;
+ text-transform: uppercase;
+}
+td.result a {
+ text-decoration: none;
+}
+td.result a:before {
+ content: "➫ ";
+}
+tr.pass td.result a {
+ color: #252;
+}
+tr.pass td.result a:hover {
+ color: #373;
+}
+tr.fail td.result a {
+ color: #622;
+}
+tr.fail td.result a:hover {
+ color: #933;
+}
+tr.nonet td.result a {
+ color: #641;
+}
+tr.nonet td.result a:hover {
+ color: #962;
+}
+td.ref {
+ font-family: monospace;
+}
+td.ref a {
+ color: #333;
+}
+td.ref a:hover {
+ color: #555;
+}
+table tr.pass td, table tr.fail td {
+ border-top: solid white 1px;
+}
+p {
+ margin: 1.3em;
+}
+code {
+ padding: 0.3em 0.5em;
+ background: #eee;
+ border: 1px solid #bbb;
+ border-radius: 1em;
+}
+#footer {
+ margin: 0;
+ background: #aaa;
+ color: #222;
+ border-top: #888 1px solid;
+ font-size: 80%;
+ padding: 0;
+ position: fixed;
+ bottom: 0;
+ width: 100%;
+ display: table;
+}
+#footer p {
+ padding: 1.3em;
+ display: table-cell;
+}
+#footer p code {
+ font-size: 110%;
+}
+#footer p.about {
+ text-align: right;
+}
+</style>
+</head>
+<body>
+<p class="branding">Mason</p>
+<h1>Baserock: Continuous Delivery</h1>
+<p>Build log of changes to <code>BRANCH</code> from <code>TROVE</code>. Most recent first.</p>
+<table>
+<tr class="headings">
+ <th>Started</th>
+ <th>Ref</th>
+ <th>Duration</th>
+ <th>Result</th>
+</tr>
+<!--INSERTION POINT-->
+</table>
+<div id="footer">
+<p>Last checked for updates at: <code>....-..-.. ..:..:..</code></p>
+<p class="about">Generated by Mason | Powered by Baserock</p>
+</div>
+</body>
+</html>
+EOF
+
+ sed -i 's/BRANCH/'"$(sed_escape "$1")"'/' $REPORT_PATH
+ sed -i 's/TROVE/'"$(sed_escape "$2")"'/' $REPORT_PATH
+}
+
+update_report() {
+ # Give function params sensible names
+ build_start_time="$1"
+ build_trove_host="$2"
+ build_ref="$3"
+ build_sha1="$4"
+ build_duration="$5"
+ build_result="$6"
+
+ # Generate template if report file is not there
+ if [ ! -f $REPORT_PATH ]; then
+ create_report $build_ref $build_trove_host
+ fi
+
+ # Build table row for insertion into report file
+ if [ "$build_result" = nonet ]; then
+ msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref">Failed to contact '"${build_trove_host}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="log/'"${build_sha1}"'--'"${build_start_time}"'.log">'"${build_result}"'</a></td></tr>'
+ else
+ msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref"><a href="http://'"${build_trove_host}"'/cgi-bin/cgit.cgi/baserock/baserock/definitions.git/commit/?h='"${build_ref}"'&id='"${build_sha1}"'">'"${build_sha1}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="log/'"${build_sha1}"'--'"${build_start_time}"'.log">'"${build_result}"'</a></td></tr>'
+ fi
+
+ # Insert report line, newest at top
+ sed -i 's/<!--INSERTION POINT-->/<!--INSERTION POINT-->\n'"$(sed_escape "$msg")"'/' $REPORT_PATH
+}
+
+update_report_time() {
+ # Give function params sensible names
+ build_start_time="$1"
+
+ # If the report file exists, update the last-checked-for-updates time
+ if [ -f $REPORT_PATH ]; then
+ sed -i 's/<code>....-..-.. ..:..:..<\/code>/<code>'"$(sed_escape "$build_start_time")"'<\/code>/' $REPORT_PATH
+ fi
+}
+
+START_TIME=`date +%Y-%m-%d\ %T`
+
+update_report_time "$START_TIME"
+cp "$REPORT_PATH" "$SERVER_PATH/index.html"
+
+logfile="$(mktemp)"
+/usr/lib/mason/mason.sh 2>&1 | tee "$logfile"
+case "${PIPESTATUS[0]}" in
+0)
+ RESULT=pass
+ ;;
+33)
+ RESULT=skip
+ ;;
+42)
+ RESULT=nonet
+ ;;
+*)
+ RESULT=fail
+ ;;
+esac
+
+# TODO: Update page with last executed time
+if [ "$RESULT" = skip ]; then
+ rm "$logfile"
+ exit 0
+fi
+
+DURATION=$(( $(date +%s) - $(date --date="$START_TIME" +%s) ))
+SHA1="$(cd "ws/$DEFINITIONS_REF/$UPSTREAM_TROVE_ADDRESS/baserock/baserock/definitions" && git rev-parse HEAD)"
+
+update_report "$START_TIME" \
+ "$UPSTREAM_TROVE_ADDRESS" \
+ "$DEFINITIONS_REF" \
+ "$SHA1" \
+ "$DURATION" \
+ "$RESULT"
+
+
+#
+# Copy report into server directory
+#
+
+cp "$REPORT_PATH" "$SERVER_PATH/index.html"
+mkdir "$SERVER_PATH/log"
+mv "$logfile" "$SERVER_PATH/log/$SHA1--$START_TIME.log"
diff --git a/extensions/mason/mason-setup.service b/extensions/mason/mason-setup.service
new file mode 100644
index 00000000..60403bde
--- /dev/null
+++ b/extensions/mason/mason-setup.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Run mason-setup Ansible scripts
+Requires=network.target
+After=network.target
+Requires=opensshd.service
+After=opensshd.service
+
+# If there's a shared /var subvolume, it must be mounted before this
+# unit runs.
+Requires=local-fs.target
+After=local-fs.target
+
+ConditionPathExists=/etc/mason/mason.conf
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/mason-setup/ansible/hosts /usr/lib/mason-setup/ansible/mason-setup.yml
diff --git a/extensions/mason/mason.service b/extensions/mason/mason.service
new file mode 100644
index 00000000..d5c99498
--- /dev/null
+++ b/extensions/mason/mason.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Mason: Continuous Delivery Service
+After=mason-setup.service
+ConditionPathIsDirectory=/srv/distbuild
+
+[Service]
+User=root
+ExecStart=/usr/lib/mason/mason-report.sh
+WorkingDirectory=/srv/distbuild
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extensions/mason/mason.sh b/extensions/mason/mason.sh
new file mode 100755
index 00000000..dba99dfa
--- /dev/null
+++ b/extensions/mason/mason.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+# Load OpenStack credentials
+if [ -f "/etc/os.conf" ]; then
+ . /etc/os.conf
+fi
+
+set -e
+set -x
+
+# Load our deployment config
+. /etc/mason.conf
+
+if [ ! -e ws ]; then
+ morph init ws
+fi
+cd ws
+
+definitions_repo="$DEFINITIONS_REF"/"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions
+if [ ! -e "$definitions_repo" ]; then
+ morph checkout git://"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions "$DEFINITIONS_REF"
+ cd "$definitions_repo"
+ git config user.name "$TROVE_ID"-mason
+ git config user.email "$TROVE_ID"-mason@$(hostname)
+else
+ cd "$definitions_repo"
+ SHA1_PREV="$(git rev-parse HEAD)"
+fi
+
+if ! git remote update origin; then
+ echo ERROR: Unable to contact trove
+ exit 42
+fi
+git clean -fxd
+git reset --hard origin/"$DEFINITIONS_REF"
+
+SHA1="$(git rev-parse HEAD)"
+
+if [ -f "$HOME/success" ] && [ "$SHA1" = "$SHA1_PREV" ]; then
+ echo INFO: No changes to "$DEFINITIONS_REF", nothing to do
+ exit 33
+fi
+
+rm -f "$HOME/success"
+
+echo INFO: Mason building: $DEFINITIONS_REF at $SHA1
+
+if ! "scripts/release-build" --no-default-configs \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --artifact-cache-server "http://$ARTIFACT_CACHE_SERVER:8080/" \
+ --controllers "$DISTBUILD_ARCH:$DISTBUILD_CONTROLLER_ADDRESS" \
+ "$BUILD_CLUSTER_MORPHOLOGY"; then
+ echo ERROR: Failed to build release images
+ echo Build logs for chunks:
+ find builds -type f -exec echo {} \; -exec cat {} \;
+ exit 1
+fi
+
+releases_made="$(cd release && ls | wc -l)"
+if [ "$releases_made" = 0 ]; then
+ echo ERROR: No release images created
+ exit 1
+else
+ echo INFO: Created "$releases_made" release images
+fi
+
+if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
+ "scripts/release-test-os" \
+ --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --trove-id "$TROVE_ID" \
+ --net-id "$OPENSTACK_NETWORK_ID" \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+elif [ "$TEST_INFRASTRUCTURE_TYPE" = "kvmhost" ]; then
+ "scripts/release-test" \
+ --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --trove-id "$TROVE_ID" \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+fi
+
+"scripts/release-upload" --build-trove-host "$ARTIFACT_CACHE_SERVER" \
+ --arch "$DISTBUILD_ARCH" \
+ --log-level=debug --log="$HOME"/release-upload.log \
+ --public-trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --public-trove-username root \
+ --public-trove-artifact-dir /home/cache/artifacts \
+ --no-upload-release-artifacts \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+
+echo INFO: Artifact upload complete for $DEFINITIONS_REF at $SHA1
+
+touch "$HOME/success"
diff --git a/extensions/mason/mason.timer b/extensions/mason/mason.timer
new file mode 100644
index 00000000..107dff97
--- /dev/null
+++ b/extensions/mason/mason.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Runs Mason continually with 1 min between calls
+
+[Timer]
+#Time between Mason finishing and calling it again
+OnUnitActiveSec=1min
+Unit=mason.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extensions/mason/os-init-script b/extensions/mason/os-init-script
new file mode 100644
index 00000000..77afb926
--- /dev/null
+++ b/extensions/mason/os-init-script
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# This allows the test runner to know that cloud-init has completed the
+# disc resizing, and there is enough free space to continue.
+touch /root/cloud-init-finished
+
diff --git a/extensions/mason/share/mason.conf b/extensions/mason/share/mason.conf
new file mode 100644
index 00000000..1295ce84
--- /dev/null
+++ b/extensions/mason/share/mason.conf
@@ -0,0 +1,14 @@
+# This file is generarated by the mason-setup systemd unit.
+# If you want to change the configuration, change the configuration
+# in /etc/mason/mason.conf and restart the service.
+
+ARTIFACT_CACHE_SERVER={{ ARTIFACT_CACHE_SERVER|quote }}
+UPSTREAM_TROVE_ADDRESS={{ TROVE_HOST|quote }}
+DEFINITIONS_REF={{ MASON_DEFINITIONS_REF|quote }}
+DISTBUILD_ARCH={{ MASON_DISTBUILD_ARCH|quote }}
+DISTBUILD_CONTROLLER_ADDRESS={{ CONTROLLERHOST|quote }}
+TROVE_ID={{ TROVE_ID|quote }}
+BUILD_CLUSTER_MORPHOLOGY={{ MASON_CLUSTER_MORPHOLOGY|quote }}
+MASON_TEST_HOST={{ MASON_TEST_HOST|quote }}
+TEST_INFRASTRUCTURE_TYPE={{ TEST_INFRASTRUCTURE_TYPE|quote }}
+{% if OPENSTACK_NETWORK_ID is defined %}OPENSTACK_NETWORK_ID={{ OPENSTACK_NETWORK_ID|quote }}{% endif %}
diff --git a/extensions/mason/share/os.conf b/extensions/mason/share/os.conf
new file mode 100644
index 00000000..21ef398c
--- /dev/null
+++ b/extensions/mason/share/os.conf
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# A version of this file with the relevant information included can be
+# obtained by navigating to 'Access & Security' -> 'API Access' ->
+# 'Download OpenStack RC file' in The Horizon web interface of your
+# OpenStack. However, the file obtained from there sets OS_PASSWORD
+# such that it will ask the user for a password, so you will need to
+# change that for Mason to work automatically.
+#
+# With the addition of Keystone, to use an openstack cloud you should
+# authenticate against keystone, which returns a **Token** and **Service
+# Catalog**. The catalog contains the endpoint for all services the
+# user/tenant has access to - including nova, glance, keystone, swift.
+#
+# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
+# will use the 1.1 *compute api*
+export OS_AUTH_URL={{ OS_AUTH_URL|quote }}
+
+# With the addition of Keystone we have standardized on the term **tenant**
+# as the entity that owns the resources.
+export OS_TENANT_ID={{ OS_TENANT_ID|quote }}
+export OS_TENANT_NAME={{ OS_TENANT_NAME|quote }}
+
+# In addition to the owning entity (tenant), openstack stores the entity
+# performing the action as the **user**.
+export OS_USERNAME={{ OS_USERNAME|quote }}
+
+# With Keystone you pass the keystone password.
+export OS_PASSWORD={{ OS_PASSWORD|quote }}
+
diff --git a/extensions/moonshot-kernel.configure b/extensions/moonshot-kernel.configure
new file mode 100644
index 00000000..11d01751
--- /dev/null
+++ b/extensions/moonshot-kernel.configure
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to convert a plain
+# kernel Image to uImage, for an HP Moonshot m400 cartridge
+
+set -eu
+
+case "$MOONSHOT_KERNEL" in
+ True|yes)
+ echo "Converting kernel image for Moonshot"
+ mkimage -A arm -O linux -C none -T kernel -a 0x00080000 \
+ -e 0x00080000 -n Linux -d "$1/boot/vmlinux" "$1/boot/uImage"
+ ;;
+ *)
+ echo Unrecognised option "$MOONSHOT_KERNEL" to MOONSHOT_KERNEL
+ exit 1
+ ;;
+esac
diff --git a/extensions/nfsboot-server.configure b/extensions/nfsboot-server.configure
new file mode 100755
index 00000000..9fb48096
--- /dev/null
+++ b/extensions/nfsboot-server.configure
@@ -0,0 +1,58 @@
+#!/bin/sh
+#
+# Copyright (C) 2013-2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#
+# This is a "morph deploy" configuration extension to set up a server for
+# booting over nfs and tftp.
+set -e
+
+ROOT="$1"
+
+##########################################################################
+
+nfsboot_root=/srv/nfsboot
+tftp_root="$nfsboot_root"/tftp
+nfs_root="$nfsboot_root"/nfs
+mkdir -p "$ROOT$tftp_root" "$ROOT$nfs_root"
+
+install -D /dev/stdin "$ROOT/usr/lib/systemd/system/nfsboot-tftp.service" <<EOF
+[Unit]
+Description=tftp service for booting kernels
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/udpsvd -E 0 69 /usr/sbin/tftpd $tftp_root
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+for prefix in / /usr; do
+ for unit in nfsboot-tftp.service nfs-server.service; do
+ unit_path="${prefix}/lib/systemd/system/$unit"
+ if [ -e "$ROOT/$unit_path" ]; then
+ ln -s "../../../../$unit_path" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$unit"
+ fi
+ done
+done
+
+pxelinux_file="$ROOT/usr/share/syslinux/pxelinux.0"
+if [ -e "$pxelinux_file" ]; then
+ cp "$pxelinux_file" "$ROOT$tftp_root/pxelinux.0"
+fi
diff --git a/extensions/nfsboot.check b/extensions/nfsboot.check
new file mode 100755
index 00000000..e273f61c
--- /dev/null
+++ b/extensions/nfsboot.check
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'nfsboot' write extension'''
+
+import cliapp
+import os
+
+import morphlib.writeexts
+
+
+class NFSBootCheckExtension(morphlib.writeexts.WriteExtension):
+
+ _nfsboot_root = '/srv/nfsboot'
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ location = args[0]
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise cliapp.AppException(
+ 'Upgrading is not currently supported for NFS deployments.')
+
+ hostname = os.environ.get('HOSTNAME', None)
+ if hostname is None:
+ raise cliapp.AppException('You must specify a HOSTNAME.')
+ if hostname == 'baserock':
+ raise cliapp.AppException('It is forbidden to nfsboot a system '
+ 'with hostname "%s"' % hostname)
+
+ self.test_good_server(location)
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+ versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems',
+ version_label)
+ if self.version_exists(versioned_root, location):
+ raise cliapp.AppException(
+ 'Root file system for host %s (version %s) already exists on '
+ 'the NFS server %s. Deployment aborted.' % (hostname,
+ version_label, location))
+
+ def test_good_server(self, server):
+ self.check_ssh_connectivity(server)
+
+ # Is an NFS server
+ try:
+ cliapp.ssh_runcmd(
+ 'root@%s' % server, ['test', '-e', '/etc/exports'])
+ except cliapp.AppException:
+ raise cliapp.AppException('server %s is not an nfs server'
+ % server)
+ try:
+ cliapp.ssh_runcmd(
+ 'root@%s' % server, ['systemctl', 'is-enabled',
+ 'nfs-server.service'])
+
+ except cliapp.AppException:
+ raise cliapp.AppException('server %s does not control its '
+ 'nfs server by systemd' % server)
+
+ # TFTP server exports /srv/nfsboot/tftp
+ tftp_root = os.path.join(self._nfsboot_root, 'tftp')
+ try:
+ cliapp.ssh_runcmd(
+ 'root@%s' % server, ['test' , '-d', tftp_root])
+ except cliapp.AppException:
+ raise cliapp.AppException('server %s does not export %s' %
+ (tftp_root, server))
+
+ def version_exists(self, versioned_root, location):
+ try:
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['test', '-d', versioned_root])
+ except cliapp.AppException:
+ return False
+
+ return True
+
+
+NFSBootCheckExtension().run()
diff --git a/extensions/nfsboot.configure b/extensions/nfsboot.configure
new file mode 100755
index 00000000..6a68dc48
--- /dev/null
+++ b/extensions/nfsboot.configure
@@ -0,0 +1,30 @@
+#!/bin/sh
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Remove all networking interfaces. On nfsboot systems, eth0 is set up
+# during kernel init, and the normal ifup@eth0.service systemd unit
+# would break the NFS connection and cause the system to hang.
+
+
+set -e
+if [ "$NFSBOOT_CONFIGURE" ]; then
+ # Remove all networking interfaces but loopback
+ cat > "$1/etc/network/interfaces" <<EOF
+auto lo
+iface lo inet loopback
+EOF
+
+fi
diff --git a/extensions/nfsboot.write b/extensions/nfsboot.write
new file mode 100755
index 00000000..d928775e
--- /dev/null
+++ b/extensions/nfsboot.write
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to an nfsboot server
+
+*** DO NOT USE ***
+- This was written before 'proper' deployment mechanisms were in place
+It is unlikely to work at all and will not work correctly
+
+Use the pxeboot write extension instead
+
+***
+
+
+
+An nfsboot server is defined as a baserock system that has tftp and nfs
+servers running, the tftp server is exporting the contents of
+/srv/nfsboot/tftp/ and the user has sufficient permissions to create nfs roots
+in /srv/nfsboot/nfs/
+
+'''
+
+
+import cliapp
+import os
+import glob
+
+import morphlib.writeexts
+
+
+class NFSBootWriteExtension(morphlib.writeexts.WriteExtension):
+
+ '''Create an NFS root and kernel on TFTP during Morph's deployment.
+
+ The location command line argument is the hostname of the nfsboot server.
+ The user is expected to provide the location argument
+ using the following syntax:
+
+ HOST
+
+ where:
+
+ * HOST is the host of the nfsboot server
+
+ The extension will connect to root@HOST via ssh to copy the kernel and
+ rootfs, and configure the nfs server.
+
+ It requires root because it uses systemd, and reads/writes to /etc.
+
+ '''
+
+ _nfsboot_root = '/srv/nfsboot'
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+ hostname = os.environ['HOSTNAME']
+
+ versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems',
+ version_label)
+
+ self.copy_rootfs(temp_root, location, versioned_root, hostname)
+ self.copy_kernel(temp_root, location, versioned_root, version_label,
+ hostname)
+ self.configure_nfs(location, hostname)
+
+ def create_local_state(self, location, hostname):
+ statedir = os.path.join(self._nfsboot_root, hostname, 'state')
+ subdirs = [os.path.join(statedir, 'home'),
+ os.path.join(statedir, 'opt'),
+ os.path.join(statedir, 'srv')]
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['mkdir', '-p'] + subdirs)
+
+ def copy_kernel(self, temp_root, location, versioned_root, version,
+ hostname):
+ bootdir = os.path.join(temp_root, 'boot')
+ image_names = ['vmlinuz', 'zImage', 'uImage']
+ for name in image_names:
+ try_path = os.path.join(bootdir, name)
+ if os.path.exists(try_path):
+ kernel_src = try_path
+ break
+ else:
+ raise cliapp.AppException(
+ 'Could not find a kernel in the system: none of '
+ '%s found' % ', '.join(image_names))
+
+ kernel_dest = os.path.join(versioned_root, 'orig', 'kernel')
+ rsync_dest = 'root@%s:%s' % (location, kernel_dest)
+ self.status(msg='Copying kernel')
+ cliapp.runcmd(
+ ['rsync', '-s', kernel_src, rsync_dest])
+
+ # Link the kernel to the right place
+ self.status(msg='Creating links to kernel in tftp directory')
+ tftp_dir = os.path.join(self._nfsboot_root , 'tftp')
+ versioned_kernel_name = "%s-%s" % (hostname, version)
+ kernel_name = hostname
+ try:
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['ln', '-f', kernel_dest,
+ os.path.join(tftp_dir, versioned_kernel_name)])
+
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['ln', '-sf', versioned_kernel_name,
+ os.path.join(tftp_dir, kernel_name)])
+ except cliapp.AppException:
+ raise cliapp.AppException('Could not create symlinks to the '
+ 'kernel at %s in %s on %s'
+ % (kernel_dest, tftp_dir, location))
+
+ def copy_rootfs(self, temp_root, location, versioned_root, hostname):
+ rootfs_src = temp_root + '/'
+ orig_path = os.path.join(versioned_root, 'orig')
+ run_path = os.path.join(versioned_root, 'run')
+
+ self.status(msg='Creating destination directories')
+ try:
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['mkdir', '-p', orig_path, run_path])
+ except cliapp.AppException:
+ raise cliapp.AppException('Could not create dirs %s and %s on %s'
+ % (orig_path, run_path, location))
+
+ self.status(msg='Creating \'orig\' rootfs')
+ cliapp.runcmd(
+ ['rsync', '-asXSPH', '--delete', rootfs_src,
+ 'root@%s:%s' % (location, orig_path)])
+
+ self.status(msg='Creating \'run\' rootfs')
+ try:
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['rm', '-rf', run_path])
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['cp', '-al', orig_path, run_path])
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['rm', '-rf', os.path.join(run_path, 'etc')])
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['cp', '-a', os.path.join(orig_path, 'etc'),
+ os.path.join(run_path, 'etc')])
+ except cliapp.AppException:
+ raise cliapp.AppException('Could not create \'run\' rootfs'
+ ' from \'orig\'')
+
+ self.status(msg='Linking \'default\' to latest system')
+ try:
+ cliapp.ssh_runcmd('root@%s' % location,
+ ['ln', '-sfn', versioned_root,
+ os.path.join(self._nfsboot_root, hostname, 'systems',
+ 'default')])
+ except cliapp.AppException:
+ raise cliapp.AppException('Could not link \'default\' to %s'
+ % versioned_root)
+
+ def configure_nfs(self, location, hostname):
+ exported_path = os.path.join(self._nfsboot_root, hostname)
+ exports_path = '/etc/exports'
+ # If that path is not already exported:
+ try:
+ cliapp.ssh_runcmd(
+ 'root@%s' % location, ['grep', '-q', exported_path,
+ exports_path])
+ except cliapp.AppException:
+ ip_mask = '*'
+ options = 'rw,no_subtree_check,no_root_squash,async'
+ exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, options)
+ exports_append_sh = '''\
+set -eu
+target="$1"
+temp=$(mktemp)
+cat "$target" > "$temp"
+cat >> "$temp"
+mv "$temp" "$target"
+'''
+ cliapp.ssh_runcmd(
+ 'root@%s' % location,
+ ['sh', '-c', exports_append_sh, '--', exports_path],
+ feed_stdin=exports_string)
+ cliapp.ssh_runcmd(
+ 'root@%s' % location, ['systemctl', 'restart',
+ 'nfs-server.service'])
+
+
+NFSBootWriteExtension().run()
diff --git a/extensions/nfsboot.write.help b/extensions/nfsboot.write.help
new file mode 100644
index 00000000..186c479a
--- /dev/null
+++ b/extensions/nfsboot.write.help
@@ -0,0 +1,33 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ *** DO NOT USE ***
+ - This was written before 'proper' deployment mechanisms were in place.
+ It is unlikely to work at all, and will not work correctly.
+
+ Use the pxeboot write extension instead
+
+ ***
+ Deploy a system image and kernel to an nfsboot server.
+
+ An nfsboot server is defined as a baserock system that has
+ tftp and nfs servers running, the tftp server is exporting
+ the contents of /srv/nfsboot/tftp/ and the user has sufficient
+ permissions to create nfs roots in /srv/nfsboot/nfs/.
+
+ The `location` argument is the hostname of the nfsboot server.
+
+ The extension will connect to root@HOST via ssh to copy the
+ kernel and rootfs, and configure the nfs server.
diff --git a/extensions/openstack-ceilometer.configure b/extensions/openstack-ceilometer.configure
new file mode 100644
index 00000000..9c0b7b6d
--- /dev/null
+++ b/extensions/openstack-ceilometer.configure
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool CEILOMETER_ENABLE_CONTROLLER
+check_bool CEILOMETER_ENABLE_COMPUTE
+
+if ! "$CEILOMETER_ENABLE_CONTROLLER" && \
+ ! "$CEILOMETER_ENABLE_COMPUTE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$CEILOMETER_SERVICE_USER" -o \
+ -z "$CEILOMETER_SERVICE_PASSWORD" -o \
+ -z "$CEILOMETER_DB_USER" -o \
+ -z "$CEILOMETER_DB_PASSWORD" -o \
+ -z "$METERING_SECRET" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Ceilometer were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$CEILOMETER_ENABLE_COMPUTE" || "$CEILOMETER_ENABLE_CONTROLLER"; then
+ enable openstack-ceilometer-config-setup
+fi
+if "$CEILOMETER_ENABLE_COMPUTE"; then
+ enable openstack-ceilometer-compute
+fi
+if "$CEILOMETER_ENABLE_CONTROLLER"; then
+ enable openstack-ceilometer-db-setup
+ enable openstack-ceilometer-api
+ enable openstack-ceilometer-collector
+ enable openstack-ceilometer-notification
+ enable openstack-ceilometer-central
+ enable openstack-ceilometer-alarm-evaluator
+ enable openstack-ceilometer-alarm-notifier
+fi
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/ceilometer.conf"
+import os, sys, yaml
+
+ceilometer_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'CEILOMETER_SERVICE_PASSWORD': os.environ['CEILOMETER_SERVICE_PASSWORD'],
+ 'CEILOMETER_SERVICE_USER': os.environ['CEILOMETER_SERVICE_USER'],
+ 'CEILOMETER_DB_USER': os.environ['CEILOMETER_DB_USER'],
+ 'CEILOMETER_DB_PASSWORD': os.environ['CEILOMETER_DB_PASSWORD'],
+ 'METERING_SECRET': os.environ['METERING_SECRET'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(ceilometer_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/openstack-cinder.configure b/extensions/openstack-cinder.configure
new file mode 100644
index 00000000..4c32e11a
--- /dev/null
+++ b/extensions/openstack-cinder.configure
@@ -0,0 +1,125 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool CINDER_ENABLE_CONTROLLER
+check_bool CINDER_ENABLE_COMPUTE
+check_bool CINDER_ENABLE_STORAGE
+
+if ! "$CINDER_ENABLE_CONTROLLER" && \
+ ! "$CINDER_ENABLE_COMPUTE" && \
+ ! "$CINDER_ENABLE_STORAGE"; then
+ exit 0
+fi
+
+if [ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$CINDER_DB_USER" -o \
+ -z "$CINDER_DB_PASSWORD" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$CINDER_SERVICE_USER" -o \
+ -z "$CINDER_SERVICE_PASSWORD" -o \
+ -z "$CINDER_DEVICE" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" ]; then
+ echo Some options required for Cinder were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_STORAGE"; then
+ enable iscsi-setup
+ enable target #target.service!
+ enable iscsid
+fi
+if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_CONTROLLER" || "$CINDER_ENABLE_STORAGE"; then
+ enable openstack-cinder-config-setup
+fi
+if "$CINDER_ENABLE_STORAGE"; then
+ enable openstack-cinder-lv-setup
+ enable lvm2-lvmetad
+ enable openstack-cinder-volume
+ enable openstack-cinder-backup
+ enable openstack-cinder-scheduler
+fi
+if "$CINDER_ENABLE_CONTROLLER"; then
+ enable openstack-cinder-db-setup
+ enable openstack-cinder-api
+fi
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/cinder.conf"
+import os, sys, yaml
+
+cinder_configuration={
+ 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER':os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN':os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'CINDER_DB_USER':os.environ['CINDER_DB_USER'],
+ 'CINDER_DB_PASSWORD':os.environ['CINDER_DB_PASSWORD'],
+ 'CONTROLLER_HOST_ADDRESS':os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'CINDER_SERVICE_USER':os.environ['CINDER_SERVICE_USER'],
+ 'CINDER_SERVICE_PASSWORD':os.environ['CINDER_SERVICE_PASSWORD'],
+ 'CINDER_DEVICE':os.environ['CINDER_DEVICE'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS':os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+}
+
+yaml.dump(cinder_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/openstack-glance.configure b/extensions/openstack-glance.configure
new file mode 100644
index 00000000..5da08895
--- /dev/null
+++ b/extensions/openstack-glance.configure
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool GLANCE_ENABLE_SERVICE
+
+if ! "$GLANCE_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$GLANCE_SERVICE_USER" -o \
+ -z "$GLANCE_SERVICE_PASSWORD" -o \
+ -z "$GLANCE_DB_USER" -o \
+ -z "$GLANCE_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Glance were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-glance-setup
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/glance.conf"
+import os, sys, yaml
+
+glance_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'GLANCE_SERVICE_PASSWORD': os.environ['GLANCE_SERVICE_PASSWORD'],
+ 'GLANCE_SERVICE_USER': os.environ['GLANCE_SERVICE_USER'],
+ 'GLANCE_DB_USER': os.environ['GLANCE_DB_USER'],
+ 'GLANCE_DB_PASSWORD': os.environ['GLANCE_DB_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(glance_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/openstack-ironic.configure b/extensions/openstack-ironic.configure
new file mode 100644
index 00000000..962bbcd1
--- /dev/null
+++ b/extensions/openstack-ironic.configure
@@ -0,0 +1,155 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool IRONIC_ENABLE_SERVICE
+
+if ! "$IRONIC_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$IRONIC_SERVICE_USER" -o \
+ -z "$IRONIC_SERVICE_PASSWORD" -o \
+ -z "$IRONIC_DB_USER" -o \
+ -z "$IRONIC_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Ironic were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-ironic-setup
+enable iscsi-setup
+enable target #target.service!
+enable iscsid
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/ironic.conf"
+import os, sys, yaml
+
+ironic_configuration={
+ 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'],
+ 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'],
+ 'IRONIC_DB_USER': os.environ['IRONIC_DB_USER'],
+ 'IRONIC_DB_PASSWORD': os.environ['IRONIC_DB_PASSWORD'],
+ 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER':os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+
+}
+
+yaml.dump(ironic_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+##########################################################################
+# Configure the TFTP service #
+##########################################################################
+
+tftp_root="/srv/tftp_root/" # trailing slash is essential
+mkdir -p "$ROOT/$tftp_root"
+
+install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.service" << 'EOF'
+[Unit]
+Description=tftp service for booting kernels
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+EnvironmentFile=/etc/tftp-hpa.conf
+ExecStart=/usr/sbin/in.tftpd $TFTP_OPTIONS ${TFTP_ROOT}
+StandardInput=socket
+StandardOutput=inherit
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.socket" << EOF
+[Unit]
+Description=Tftp server activation socket
+
+[Socket]
+ListenDatagram=$MANAGEMENT_INTERFACE_IP_ADDRESS:69
+FreeBind=yes
+
+[Install]
+WantedBy=sockets.target
+EOF
+
+install -D -m 644 /dev/stdin "$ROOT"/etc/tftp-hpa.conf << EOF
+TFTP_ROOT=$tftp_root
+TFTP_OPTIONS="-v -v -v -v -v --map-file $tftp_root/map-file"
+EOF
+
+install -D /dev/stdin -m 644 "$ROOT/$tftp_root"/map-file << EOF
+r ^([^/]) $tftp_root\1
+r ^/tftpboot/ $tftp_root\2
+EOF
+
+cp "$ROOT"/usr/share/syslinux/pxelinux.0 "$ROOT/$tftp_root"
diff --git a/extensions/openstack-keystone.configure b/extensions/openstack-keystone.configure
new file mode 100644
index 00000000..6b011b14
--- /dev/null
+++ b/extensions/openstack-keystone.configure
@@ -0,0 +1,123 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool KEYSTONE_ENABLE_SERVICE
+
+if ! "$KEYSTONE_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$KEYSTONE_ADMIN_PASSWORD" -o \
+ -z "$KEYSTONE_DB_USER" -o \
+ -z "$KEYSTONE_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Keystone were defined, but not all.
+ exit 1
+fi
+
+python <<'EOF'
+import socket
+import sys
+import os
+
+try:
+ socket.inet_pton(socket.AF_INET, os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'])
+except:
+ print "Error: MANAGEMENT_INTERFACE_IP_ADDRESS is not a valid IP"
+ sys.exit(1)
+EOF
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-keystone-setup
+enable openstack-horizon-setup
+enable postgres-server-setup
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/keystone.conf"
+import os, sys, yaml
+
+keystone_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'KEYSTONE_ADMIN_PASSWORD': os.environ['KEYSTONE_ADMIN_PASSWORD'],
+ 'KEYSTONE_DB_USER': os.environ['KEYSTONE_DB_USER'],
+ 'KEYSTONE_DB_PASSWORD': os.environ['KEYSTONE_DB_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(keystone_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+python << 'EOF' > "$OPENSTACK_DATA/postgres.conf"
+import os, sys, yaml
+
+postgres_configuration={
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+}
+
+yaml.dump(postgres_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/openstack-network.configure b/extensions/openstack-network.configure
new file mode 100644
index 00000000..10be5a1c
--- /dev/null
+++ b/extensions/openstack-network.configure
@@ -0,0 +1,50 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+###################
+# Enable services #
+###################
+
+enable openvswitch-setup
+enable openstack-network-setup
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/network.conf"
+import os, sys, yaml
+
+network_configuration = {}
+
+optional_keys = ('EXTERNAL_INTERFACE',)
+
+network_configuration.update((k, os.environ[k]) for k in optional_keys if k in os.environ)
+
+yaml.dump(network_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/openstack-neutron.configure b/extensions/openstack-neutron.configure
new file mode 100644
index 00000000..210222db
--- /dev/null
+++ b/extensions/openstack-neutron.configure
@@ -0,0 +1,138 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/openstack-neutron-$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-neutron-$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool NEUTRON_ENABLE_CONTROLLER
+check_bool NEUTRON_ENABLE_MANAGER
+check_bool NEUTRON_ENABLE_AGENT
+
+if ! "$NEUTRON_ENABLE_CONTROLLER" && \
+ ! "$NEUTRON_ENABLE_MANAGER" && \
+ ! "$NEUTRON_ENABLE_AGENT"; then
+ exit 0
+fi
+
+if [ -z "$NEUTRON_SERVICE_USER" -o \
+ -z "$NEUTRON_SERVICE_PASSWORD" -o \
+ -z "$NEUTRON_DB_USER" -o \
+ -z "$NEUTRON_DB_PASSWORD" -o \
+ -z "$METADATA_PROXY_SHARED_SECRET" -o \
+ -z "$NOVA_SERVICE_USER" -o \
+ -z "$NOVA_SERVICE_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Neutron were defined, but not all.
+ exit 1
+fi
+
+#############################################
+# Ensure /var/run is an appropriate symlink #
+#############################################
+
+if ! link="$(readlink "$ROOT/var/run")" || [ "$link" != ../run ]; then
+ rm -rf "$ROOT/var/run"
+ ln -s ../run "$ROOT/var/run"
+fi
+
+###################
+# Enable services #
+###################
+
+if "$NEUTRON_ENABLE_CONTROLLER"; then
+ enable config-setup
+ enable db-setup
+ enable server
+fi
+
+if "$NEUTRON_ENABLE_MANAGER"; then
+ enable config-setup
+ enable ovs-cleanup
+ enable dhcp-agent
+ enable l3-agent
+ enable plugin-openvswitch-agent
+ enable metadata-agent
+fi
+
+if "$NEUTRON_ENABLE_AGENT"; then
+ enable config-setup
+ enable plugin-openvswitch-agent
+fi
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/neutron.conf"
+import os, sys, yaml
+
+nova_configuration={
+ 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'],
+ 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'],
+ 'NEUTRON_DB_USER': os.environ['NEUTRON_DB_USER'],
+ 'NEUTRON_DB_PASSWORD': os.environ['NEUTRON_DB_PASSWORD'],
+ 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'],
+ 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'],
+ 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+}
+
+yaml.dump(nova_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/openstack-nova.configure b/extensions/openstack-nova.configure
new file mode 100644
index 00000000..213f1852
--- /dev/null
+++ b/extensions/openstack-nova.configure
@@ -0,0 +1,168 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/openstack-nova-$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool NOVA_ENABLE_CONTROLLER
+check_bool NOVA_ENABLE_COMPUTE
+
+if ! "$NOVA_ENABLE_CONTROLLER" && \
+ ! "$NOVA_ENABLE_COMPUTE"; then
+ exit 0
+fi
+
+if [ -z "$NOVA_SERVICE_USER" -o \
+ -z "$NOVA_SERVICE_PASSWORD" -o \
+ -z "$NOVA_DB_USER" -o \
+ -z "$NOVA_DB_PASSWORD" -o \
+ -z "$NOVA_VIRT_TYPE" -o \
+ -z "$NEUTRON_SERVICE_USER" -o \
+ -z "$NEUTRON_SERVICE_PASSWORD" -o \
+ -z "$IRONIC_SERVICE_USER" -a \
+ -z "$IRONIC_SERVICE_PASSWORD" -a \
+ -z "$METADATA_PROXY_SHARED_SECRET" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Nova were defined, but not all.
+ exit 1
+fi
+
+###############################################
+# Enable libvirtd and libvirt-guests services #
+###############################################
+
+wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants
+mkdir -p "$wants_dir"
+mkdir -p "$ROOT"/var/lock/subsys
+ln -sf ../libvirtd.service "$wants_dir/libvirtd.service"
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$NOVA_ENABLE_CONTROLLER" || "$NOVA_ENABLE_COMPUTE"; then
+ enable config-setup
+fi
+if "$NOVA_ENABLE_CONTROLLER" && ! "$NOVA_ENABLE_COMPUTE"; then
+ enable conductor
+fi
+if "$NOVA_ENABLE_COMPUTE"; then
+ enable compute
+fi
+if "$NOVA_ENABLE_CONTROLLER"; then
+ for service in db-setup api cert consoleauth novncproxy scheduler serialproxy; do
+ enable "$service"
+ done
+fi
+
+##########################################################################
+# Change iprange for the interal libvirt to avoid clashes
+# with eth0 ip range
+##########################################################################
+
+sed -i "s/192\.168\.122\./192\.168\.1\./g" \
+ "$ROOT"/etc/libvirt/qemu/networks/default.xml
+
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+case "$NOVA_BAREMETAL_SCHEDULING" in
+ True|true|yes)
+ export COMPUTE_MANAGER=ironic.nova.compute.manager.ClusteredComputeManager
+ export RESERVED_HOST_MEMORY_MB=0
+ export SCHEDULER_HOST_MANAGER=nova.scheduler.ironic_host_manager.IronicHostManager
+ export RAM_ALLOCATION_RATIO=1.0
+ export COMPUTE_DRIVER=nova.virt.ironic.IronicDriver
+ ;;
+ *)
+ export COMPUTE_MANAGER=nova.compute.manager.ComputeManager
+ export RESERVED_HOST_MEMORY_MB=512
+ export SCHEDULER_HOST_MANAGER=nova.scheduler.host_manager.HostManager
+ export RAM_ALLOCATION_RATIO=1.5
+ export COMPUTE_DRIVER=libvirt.LibvirtDriver
+ ;;
+esac
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/nova.conf"
+import os, sys, yaml
+
+nova_configuration={
+ 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'],
+ 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'],
+ 'NOVA_DB_USER': os.environ['NOVA_DB_USER'],
+ 'NOVA_DB_PASSWORD': os.environ['NOVA_DB_PASSWORD'],
+ 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'],
+ 'COMPUTE_MANAGER': os.environ['COMPUTE_MANAGER'],
+ 'RESERVED_HOST_MEMORY_MB': os.environ['RESERVED_HOST_MEMORY_MB'],
+ 'SCHEDULER_HOST_MANAGER': os.environ['SCHEDULER_HOST_MANAGER'],
+ 'RAM_ALLOCATION_RATIO': os.environ['RAM_ALLOCATION_RATIO'],
+ 'COMPUTE_DRIVER': os.environ['COMPUTE_DRIVER'],
+ 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'],
+ 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'],
+ 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'],
+ 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'],
+ 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+}
+
+yaml.dump(nova_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/extensions/openstack-swift-controller.configure b/extensions/openstack-swift-controller.configure
new file mode 100644
index 00000000..424ab57b
--- /dev/null
+++ b/extensions/openstack-swift-controller.configure
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+set -e
+
+export ROOT="$1"
+
+MANDATORY_OPTIONS="SWIFT_ADMIN_PASSWORD KEYSTONE_TEMPORARY_ADMIN_TOKEN"
+
+for option in $MANDATORY_OPTIONS
+do
+ if ! [[ -v $option ]]
+ then
+ missing_option=True
+ echo "Required option $option isn't set!" >&2
+ fi
+done
+
+if [[ $missing_option = True ]]; then exit 1; fi
+
+mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks
+
+ln -s "/usr/lib/systemd/system/swift-controller-setup.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-controller-setup.service"
+ln -s "/usr/lib/systemd/system/memcached.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/memcached.service"
+ln -s "/usr/lib/systemd/system/openstack-swift-proxy.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-proxy.service"
+
+cat << EOF > "$ROOT"/usr/share/openstack/swift-controller-vars.yml
+---
+SWIFT_ADMIN_PASSWORD: $SWIFT_ADMIN_PASSWORD
+MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS
+KEYSTONE_TEMPORARY_ADMIN_TOKEN: $KEYSTONE_TEMPORARY_ADMIN_TOKEN
+EOF
diff --git a/extensions/openstack.check b/extensions/openstack.check
new file mode 100755
index 00000000..a3379763
--- /dev/null
+++ b/extensions/openstack.check
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'openstack' write extension'''
+
+import cliapp
+import os
+import urlparse
+import keystoneclient
+
+import morphlib.writeexts
+
+
+class OpenStackCheckExtension(morphlib.writeexts.WriteExtension):
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise cliapp.AppException(
+ 'Use the `ssh-rsync` write extension to deploy upgrades to an '
+ 'existing remote system.')
+
+ location = args[0]
+ self.check_location(location)
+
+ self.check_imagename()
+ self.check_openstack_parameters(self._get_auth_parameters(location))
+
+ def _get_auth_parameters(self, location):
+ '''Check the environment variables needed and returns all.
+
+ The environment variables are described in the class documentation.
+ '''
+
+ auth_keys = {'OPENSTACK_USER': 'username',
+ 'OPENSTACK_TENANT': 'tenant_name',
+ 'OPENSTACK_PASSWORD': 'password'}
+
+ for key in auth_keys:
+ if os.environ.get(key, '') == '':
+ raise cliapp.AppException(key + ' was not given')
+
+ auth_params = {auth_keys[key]: os.environ[key] for key in auth_keys}
+ auth_params['auth_url'] = location
+ return auth_params
+
+ def check_imagename(self):
+ if os.environ.get('OPENSTACK_IMAGENAME', '') == '':
+ raise cliapp.AppException('OPENSTACK_IMAGENAME was not given')
+
+ def check_location(self, location):
+ x = urlparse.urlparse(location)
+ if x.scheme not in ['http', 'https']:
+ raise cliapp.AppException('URL schema must be http or https in %s'\
+ % location)
+ if (x.path != '/v2.0' and x.path != '/v2.0/'):
+ raise cliapp.AppException('API version must be v2.0 in %s'\
+ % location)
+
+ def check_openstack_parameters(self, auth_params):
+ ''' Check that we can connect to and authenticate with openstack '''
+
+ self.status(msg='Checking OpenStack credentials...')
+
+ try:
+ keystoneclient.v2_0.Client(**auth_params)
+ except keystoneclient.exceptions.Unauthorized:
+ errmsg = ('Failed to authenticate with OpenStack '
+ '(are your credentials correct?)')
+ raise cliapp.AppException(errmsg)
+
+
+OpenStackCheckExtension().run()
diff --git a/extensions/openstack.write b/extensions/openstack.write
new file mode 100755
index 00000000..67e07c18
--- /dev/null
+++ b/extensions/openstack.write
@@ -0,0 +1,93 @@
+#!/usr/bin/python
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to OpenStack.'''
+
+
+import cliapp
+import os
+import tempfile
+import urlparse
+
+import morphlib.writeexts
+
+
+class OpenStackWriteExtension(morphlib.writeexts.WriteExtension):
+
+ '''See openstack.write.help for documentation'''
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+
+ os_params = self.get_openstack_parameters()
+
+ fd, raw_disk = tempfile.mkstemp()
+ os.close(fd)
+ self.create_local_system(temp_root, raw_disk)
+ self.status(msg='Temporary disk image has been created at %s'
+ % raw_disk)
+
+ self.set_extlinux_root_to_virtio(raw_disk)
+
+ self.configure_openstack_image(raw_disk, location, os_params)
+
+ def set_extlinux_root_to_virtio(self, raw_disk):
+ '''Re-configures extlinux to use virtio disks'''
+ self.status(msg='Updating extlinux.conf')
+ with self.mount(raw_disk) as mp:
+ path = os.path.join(mp, 'extlinux.conf')
+
+ with open(path) as f:
+ extlinux_conf = f.read()
+
+ extlinux_conf = extlinux_conf.replace('root=/dev/sda',
+ 'root=/dev/vda')
+ with open(path, "w") as f:
+ f.write(extlinux_conf)
+
+ def get_openstack_parameters(self):
+ '''Get the environment variables needed.
+
+ The environment variables are described in the class documentation.
+ '''
+
+ keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT',
+ 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD')
+ return (os.environ[key] for key in keys)
+
+ def configure_openstack_image(self, raw_disk, auth_url, os_params):
+ '''Configure the image in OpenStack using glance-client'''
+ self.status(msg='Configuring OpenStack image...')
+
+ username, tenant_name, image_name, password = os_params
+ cmdline = ['glance',
+ '--os-username', username,
+ '--os-tenant-name', tenant_name,
+ '--os-password', password,
+ '--os-auth-url', auth_url,
+ 'image-create',
+ '--name=%s' % image_name,
+ '--disk-format=raw',
+ '--container-format', 'bare',
+ '--file', raw_disk]
+ cliapp.runcmd(cmdline)
+
+ self.status(msg='Image configured.')
+
+OpenStackWriteExtension().run()
diff --git a/extensions/openstack.write.help b/extensions/openstack.write.help
new file mode 100644
index 00000000..26983060
--- /dev/null
+++ b/extensions/openstack.write.help
@@ -0,0 +1,51 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Deploy a Baserock system as a *new* OpenStack virtual machine.
+ (Use the `ssh-rsync` write extension to deploy upgrades to an *existing*
+ VM)
+
+ Deploys the system to the OpenStack host using python-glanceclient.
+
+ Parameters:
+
+ * location: the authentication url of the OpenStack server using the
+ following syntax:
+
+ http://HOST:PORT/VERSION
+
+ where
+
+ * HOST is the host running OpenStack
+ * PORT is the port which is using OpenStack for authentications.
+ * VERSION is the authentication version of OpenStack (Only v2.0
+ supported)
+
+ * OPENSTACK_USER=username: the username to use in the `--os-username`
+ argument to `glance`.
+
+ * OPENSTACK_TENANT=tenant: the project name to use in the
+ `--os-tenant-name` argument to `glance`.
+
+ * OPENSTACK_IMAGENAME=imagename: the name of the image to use in the
+ `--name` argument to `glance`.
+
+ * OPENSTACK_PASSWORD=password: the password of the OpenStack user. (We
+ recommend passing this on the command-line, rather than setting an
+ environment variable or storing it in a cluster cluster definition file.)
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/extensions/pxeboot.check b/extensions/pxeboot.check
new file mode 100755
index 00000000..611708a9
--- /dev/null
+++ b/extensions/pxeboot.check
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+
+import itertools
+import os
+import subprocess
+import sys
+flatten = itertools.chain.from_iterable
+
+def powerset(iterable):
+ "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+ s = list(iterable)
+ return flatten(itertools.combinations(s, r) for r in range(len(s)+1))
+
+valid_option_sets = frozenset((
+ ('spawn-novlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',))),
+ ('spawn-vlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', 'PXEBOOT_VLAN'))),
+ ('existing-dhcp', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',
+ 'PXEBOOT_CONFIG_TFTP_ADDRESS'))),
+ ('existing-server', frozenset(('PXEBOOT_CONFIG_TFTP_ADDRESS',
+ 'PXEBOOT_ROOTFS_RSYNC_ADDRESS'))),
+))
+valid_modes = frozenset(mode for mode, opt_set in valid_option_sets)
+
+
+def compute_matches(env):
+ complete_matches = set()
+ for mode, opt_set in valid_option_sets:
+ if all(k in env for k in opt_set):
+ complete_matches.add(opt_set)
+ return complete_matches
+
+complete_matches = compute_matches(os.environ)
+
+def word_separate_options(options):
+ assert options
+ s = options.pop(-1)
+ if options:
+ s = '%s and %s' % (', '.join(options), s)
+ return s
+
+
+valid_options = frozenset(flatten(opt_set for (mode, opt_set)
+ in valid_option_sets))
+matched_options = frozenset(o for o in valid_options
+ if o in os.environ)
+if not complete_matches:
+ addable_sets = frozenset(frozenset(os) - matched_options for os in
+ valid_options
+ if frozenset(os) - matched_options)
+ print('Please provide %s' % ' or '.join(
+ word_separate_options(list(opt_set))
+ for opt_set in addable_sets if opt_set))
+ sys.exit(1)
+elif len(complete_matches) > 1:
+ removable_sets = frozenset(matched_options - frozenset(os) for os in
+ powerset(matched_options)
+ if len(compute_matches(os)) == 1)
+ print('WARNING: Following options might not be needed: %s' % ' or '.join(
+ word_separate_options(list(opt_set))
+ for opt_set in removable_sets if opt_set))
+
+if 'PXEBOOT_MODE' in os.environ:
+ mode = os.environ['PXEBOOT_MODE']
+else:
+ try:
+ mode, = (mode for (mode, opt_set) in valid_option_sets
+ if all(o in os.environ for o in opt_set))
+
+ except ValueError as e:
+ print ('More than one candidate for PXEBOOT_MODE, please '
+ 'set a value for it. Type `morph help pxeboot.write for '
+ 'more info')
+ sys.exit(1)
+
+if mode not in valid_modes:
+ print('%s is not a valid PXEBOOT_MODE' % mode)
+ sys.exit(1)
+
+if mode != 'existing-server':
+ with open(os.devnull, 'w') as devnull:
+ if subprocess.call(['systemctl', 'is-active', 'nfs-server'],
+ stdout=devnull) != 0:
+ print ('ERROR: nfs-server.service is not running and is needed '
+ 'for this deployment. Please, run `systemctl start nfs-server` '
+ 'and try `morph deploy` again.')
+ sys.exit(1)
diff --git a/extensions/pxeboot.write b/extensions/pxeboot.write
new file mode 100644
index 00000000..3a12ebcc
--- /dev/null
+++ b/extensions/pxeboot.write
@@ -0,0 +1,755 @@
+#!/usr/bin/env python
+
+
+import collections
+import contextlib
+import errno
+import itertools
+import logging
+import os
+import select
+import signal
+import shutil
+import socket
+import string
+import StringIO
+import subprocess
+import sys
+import tempfile
+import textwrap
+import urlparse
+
+import cliapp
+
+import morphlib
+
+
+def _int_to_quad_dot(i):
+ return '.'.join((
+ str(i >> 24 & 0xff),
+ str(i >> 16 & 0xff),
+ str(i >> 8 & 0xff),
+ str(i & 0xff)))
+
+
+def _quad_dot_to_int(s):
+ i = 0
+ for octet in s.split('.'):
+ i <<= 8
+ i += int(octet, 10)
+ return i
+
+
+def _netmask_to_prefixlen(mask):
+ bs = '{:032b}'.format(mask)
+ prefix = bs.rstrip('0')
+ if '0' in prefix:
+ raise ValueError('abnormal netmask: %s' %
+ _int_to_quad_dot(mask))
+ return len(prefix)
+
+
+def _get_routes():
+ routes = []
+ with open('/proc/net/route', 'r') as f:
+ for line in list(f)[1:]:
+ fields = line.split()
+ destination, flags, mask = fields[1], fields[3], fields[7]
+ flags = int(flags, 16)
+ if flags & 2:
+ # default route, ignore
+ continue
+ destination = socket.ntohl(int(destination, 16))
+ mask = socket.ntohl(int(mask, 16))
+ prefixlen = _netmask_to_prefixlen(mask)
+ routes.append((destination, prefixlen))
+ return routes
+
+
+class IPRange(object):
+ def __init__(self, prefix, prefixlen):
+ self.prefixlen = prefixlen
+ mask = (1 << prefixlen) - 1
+ self.mask = mask << (32 - prefixlen)
+ self.prefix = prefix & self.mask
+ @property
+ def bitstring(self):
+ return ('{:08b}' * 4).format(
+ self.prefix >> 24 & 0xff,
+ self.prefix >> 16 & 0xff,
+ self.prefix >> 8 & 0xff,
+ self.prefix & 0xff
+ )[:self.prefixlen]
+ def startswith(self, other_range):
+ return self.bitstring.startswith(other_range.bitstring)
+
+
+def find_subnet(valid_ranges, invalid_ranges):
+ for vr in valid_ranges:
+ known_subnets = set(ir for ir in invalid_ranges if ir.startswith(vr))
+ prefixlens = set(r.prefixlen for r in known_subnets)
+ prefixlens.add(32 - 2) # need at least 4 addresses in subnet
+ prefixlen = min(prefixlens)
+ if prefixlen <= vr.prefixlen:
+ # valid subnet is full, move on to next
+ continue
+ subnetlen = prefixlen - vr.prefixlen
+ for prefix in (subnetid + vr.prefix
+ for subnetid in xrange(1 << subnetlen)):
+ if any(subnet.prefix == prefix for subnet in known_subnets):
+ continue
+ return prefix, prefixlen
+
+
+def _normalise_macaddr(macaddr):
+ '''pxelinux.0 wants the mac address to be lowercase and - separated'''
+ digits = (c for c in macaddr.lower() if c in string.hexdigits)
+ nibble_pairs = grouper(digits, 2)
+ return '-'.join(''.join(byte) for byte in nibble_pairs)
+
+
+@contextlib.contextmanager
+def executor(target_pid):
+ 'Kills a process if its parent dies'
+ read_fd, write_fd = os.pipe()
+ helper_pid = os.fork()
+ if helper_pid == 0:
+ try:
+ os.close(write_fd)
+ while True:
+ rlist, _, _ = select.select([read_fd], [], [])
+ if read_fd in rlist:
+ d = os.read(read_fd, 1)
+ if not d:
+ os.kill(target_pid, signal.SIGKILL)
+ if d in ('', 'Q'):
+ os._exit(0)
+ else:
+ os._exit(1)
+ except BaseException as e:
+ import traceback
+ traceback.print_exc()
+ os._exit(1)
+ os.close(read_fd)
+ yield
+ os.write(write_fd, 'Q')
+ os.close(write_fd)
+
+
+def grouper(iterable, n, fillvalue=None):
+ "Collect data into fixed-length chunks or blocks"
+ # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
+ args = [iter(iterable)] * n
+ return itertools.izip_longest(*args, fillvalue=fillvalue)
+
+
+class PXEBoot(morphlib.writeexts.WriteExtension):
+ @contextlib.contextmanager
+ def _vlan(self, interface, vlan):
+ viface = '%s.%s' % (interface, vlan)
+ self.status(msg='Creating vlan %(viface)s', viface=viface)
+ subprocess.check_call(['vconfig', 'add', interface, str(vlan)])
+ try:
+ yield viface
+ finally:
+ self.status(msg='Destroying vlan %(viface)s', viface=viface)
+ subprocess.call(['vconfig', 'rem', viface])
+
+ @contextlib.contextmanager
+ def _static_ip(self, iface):
+ valid_ranges = set((
+ IPRange(_quad_dot_to_int('192.168.0.0'), 16),
+ IPRange(_quad_dot_to_int('172.16.0.0'), 12),
+ IPRange(_quad_dot_to_int('10.0.0.0'), 8),
+ ))
+ invalid_ranges = set(IPRange(prefix, prefixlen)
+ for (prefix, prefixlen) in _get_routes())
+ prefix, prefixlen = find_subnet(valid_ranges, invalid_ranges)
+ netaddr = prefix
+ dhcp_server_ip = netaddr + 1
+ client_ip = netaddr + 2
+ broadcast_ip = prefix | ((1 << (32 - prefixlen)) - 1)
+ self.status(msg='Assigning ip address %(ip)s/%(prefixlen)d to '
+ 'iface %(iface)s',
+ ip=_int_to_quad_dot(dhcp_server_ip), prefixlen=prefixlen,
+ iface=iface)
+ subprocess.check_call(['ip', 'addr', 'add',
+ '{}/{}'.format(_int_to_quad_dot(dhcp_server_ip),
+ prefixlen),
+ 'broadcast', _int_to_quad_dot(broadcast_ip),
+ 'scope', 'global',
+ 'dev', iface])
+ try:
+ yield (dhcp_server_ip, client_ip, broadcast_ip)
+ finally:
+ self.status(msg='Removing ip addresses from iface %(iface)s',
+ iface=iface)
+ subprocess.call(['ip', 'addr', 'flush', 'dev', iface])
+
+ @contextlib.contextmanager
+ def _up_interface(self, iface):
+ self.status(msg='Bringing interface %(iface)s up', iface=iface)
+ subprocess.check_call(['ip', 'link', 'set', iface, 'up'])
+ try:
+ yield
+ finally:
+ self.status(msg='Bringing interface %(iface)s down', iface=iface)
+ subprocess.call(['ip', 'link', 'set', iface, 'down'])
+
+ @contextlib.contextmanager
+ def static_ip(self, interface):
+ with self._static_ip(iface=interface) as (host_ip, client_ip,
+ broadcast_ip), \
+ self._up_interface(iface=interface):
+ yield (_int_to_quad_dot(host_ip),
+ _int_to_quad_dot(client_ip),
+ _int_to_quad_dot(broadcast_ip))
+
+ @contextlib.contextmanager
+ def vlan(self, interface, vlan):
+ with self._vlan(interface=interface, vlan=vlan) as viface, \
+ self.static_ip(interface=viface) \
+ as (host_ip, client_ip, broadcast_ip):
+ yield host_ip, client_ip, broadcast_ip
+
+ @contextlib.contextmanager
+ def _tempdir(self):
+ td = tempfile.mkdtemp()
+ print 'Created tempdir:', td
+ try:
+ yield td
+ finally:
+ shutil.rmtree(td, ignore_errors=True)
+
+ @contextlib.contextmanager
+ def _remote_tempdir(self, hostname, template):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ td = cliapp.ssh_runcmd(hostname, ['mktemp', '-d', template]).strip()
+ try:
+ yield td
+ finally:
+ if not persist:
+ cliapp.ssh_runcmd(hostname, ['find', td, '-delete'])
+
+ def _serve_tftpd(self, sock, host, port, interface, tftproot):
+ self.settings.progname = 'tftp server'
+ self._set_process_name()
+ while True:
+ logging.debug('tftpd waiting for connections')
+ # recvfrom with MSG_PEEK is how you accept UDP connections
+ _, peer = sock.recvfrom(0, socket.MSG_PEEK)
+ conn = sock
+ logging.debug('Connecting socket to peer: ' + repr(peer))
+ conn.connect(peer)
+ # The existing socket is now only serving that peer, so we need to
+ # bind a new UDP socket to the wildcard address, which needs the
+ # port to be in REUSEADDR mode.
+ conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ logging.debug('Binding replacement socket to ' + repr((host, port)))
+ sock.bind((host, port))
+
+ logging.debug('tftpd server handing connection to tftpd')
+ tftpd_serve = ['tftpd', '-rl', tftproot]
+ ret = subprocess.call(args=tftpd_serve, stdin=conn,
+ stdout=conn, stderr=None, close_fds=True)
+ # It's handy to turn off REUSEADDR after the rebinding,
+ # so we can protect against future bind attempts on this port.
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
+ logging.debug('tftpd exited %d' % ret)
+ os._exit(0)
+
+ @contextlib.contextmanager
+ def _spawned_tftp_server(self, tftproot, host_ip, interface, tftp_port=0):
+ # inetd-style launchers tend to bind UDP ports with SO_REUSEADDR,
+ # because they need to have multiple ports bound, one for recieving
+ # all connection attempts on that port, and one for each concurrent
+ # connection with a peer
+ # this makes detecting whether there's a tftpd running difficult, so
+ # we'll instead use an ephemeral port and configure the PXE boot to
+ # use that tftp server for the kernel
+ s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
+ s.bind((host_ip, tftp_port))
+ host, port = s.getsockname()
+ self.status(msg='Bound listen socket to %(host)s, %(port)s',
+ host=host, port=port)
+ pid = os.fork()
+ if pid == 0:
+ try:
+ self._serve_tftpd(sock=s, host=host, port=port,
+ interface=interface, tftproot=tftproot)
+ except BaseException as e:
+ import traceback
+ traceback.print_exc()
+ os._exit(1)
+ s.close()
+ with executor(pid):
+ try:
+ yield port
+ finally:
+ self.status(msg='Killing tftpd listener pid=%(pid)d',
+ pid=pid)
+ os.kill(pid, signal.SIGKILL)
+
+ @contextlib.contextmanager
+ def tftp_server(self, host_ip, interface, tftp_port=0):
+ with self._tempdir() as tftproot, \
+ self._spawned_tftp_server(tftproot=tftproot, host_ip=host_ip,
+ interface=interface,
+ tftp_port=tftp_port) as tftp_port:
+ self.status(msg='Serving tftp root %(tftproot)s, on port %(port)d',
+ port=tftp_port, tftproot=tftproot)
+ yield tftp_port, tftproot
+
+ @contextlib.contextmanager
+ def _local_copy(self, src, dst):
+ self.status(msg='Installing %(src)s to %(dst)s',
+ src=src, dst=dst)
+ shutil.copy2(src=src, dst=dst)
+ try:
+ yield
+ finally:
+ self.status(msg='Removing %(dst)s', dst=dst)
+ os.unlink(dst)
+
+ @contextlib.contextmanager
+ def _local_symlink(self, src, dst):
+ os.symlink(src, dst)
+ try:
+ yield
+ finally:
+ os.unlink(dst)
+
+ def local_pxelinux(self, tftproot):
+ return self._local_copy('/usr/share/syslinux/pxelinux.0',
+ os.path.join(tftproot, 'pxelinux.0'))
+
+ def local_kernel(self, rootfs, tftproot):
+ return self._local_copy(os.path.join(rootfs, 'boot/vmlinuz'),
+ os.path.join(tftproot, 'kernel'))
+
+ @contextlib.contextmanager
+ def _remote_copy(self, hostname, src, dst):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ with open(src, 'r') as f:
+ cliapp.ssh_runcmd(hostname,
+ ['install', '-D', '-m644', '/proc/self/fd/0',
+ dst], stdin=f, stdout=None, stderr=None)
+ try:
+ yield
+ finally:
+ if not persist:
+ cliapp.ssh_runcmd(hostname, ['rm', dst])
+
+ @contextlib.contextmanager
+ def _remote_symlink(self, hostname, src, dst):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ cliapp.ssh_runcmd(hostname,
+ ['ln', '-s', '-f', src, dst],
+ stdin=None, stdout=None, stderr=None)
+ try:
+ yield
+ finally:
+ if not persist:
+ cliapp.ssh_runcmd(hostname, ['rm', '-f', dst])
+
+ @contextlib.contextmanager
+ def remote_kernel(self, rootfs, tftp_url, macaddr):
+ for name in ('vmlinuz', 'zImage', 'uImage'):
+ kernel_path = os.path.join(rootfs, 'boot', name)
+ if os.path.exists(kernel_path):
+ break
+ else:
+ raise cliapp.AppException('Failed to locate kernel')
+ url = urlparse.urlsplit(tftp_url)
+ basename = '{}-kernel'.format(_normalise_macaddr(macaddr))
+ target_path = os.path.join(url.path, basename)
+ with self._remote_copy(hostname=url.hostname, src=kernel_path,
+ dst=target_path):
+ yield basename
+
+ @contextlib.contextmanager
+ def remote_fdt(self, rootfs, tftp_url, macaddr):
+ fdt_rel_path = os.environ.get('DTB_PATH', '')
+ if fdt_rel_path == '':
+ yield
+ fdt_abs_path = os.path.join(rootfs, fdt_rel_path)
+ if not fdt_abs_path:
+ raise cliapp.AppException('Failed to locate Flattened Device Tree')
+ url = urlparse.urlsplit(tftp_url)
+ basename = '{}-fdt'.format(_normalise_macaddr(macaddr))
+ target_path = os.path.join(url.path, basename)
+ with self._remote_copy(hostname=url.hostname, src=fdt_abs_path,
+ dst=target_path):
+ yield basename
+
+ @contextlib.contextmanager
+ def local_nfsroot(self, rootfs, target_ip):
+ nfsroot = target_ip + ':' + rootfs
+ self.status(msg='Exporting %(nfsroot)s as local nfsroot',
+ nfsroot=nfsroot)
+ cliapp.runcmd(['exportfs', '-o', 'ro,insecure,no_root_squash',
+ nfsroot])
+ try:
+ yield
+ finally:
+ self.status(msg='Removing %(nfsroot)s from local nfsroots',
+ nfsroot=nfsroot)
+ cliapp.runcmd(['exportfs', '-u', nfsroot])
+
+ @contextlib.contextmanager
+ def remote_nfsroot(self, rootfs, rsync_url, macaddr):
+ url = urlparse.urlsplit(rsync_url)
+ template = os.path.join(url.path,
+ _normalise_macaddr(macaddr) + '.XXXXXXXXXX')
+ with self._remote_tempdir(hostname=url.hostname, template=template) \
+ as tempdir:
+ nfsroot = urlparse.urlunsplit((url.scheme, url.netloc, tempdir,
+ url.query, url.fragment))
+ cliapp.runcmd(['rsync', '-asSPH', '--delete', rootfs, nfsroot],
+ stdin=None, stdout=open(os.devnull, 'w'),
+ stderr=None)
+ yield os.path.join(os.path.basename(tempdir),
+ os.path.basename(rootfs))
+
+ @staticmethod
+ def _write_pxe_config(fh, kernel_tftp_url, rootfs_nfs_url, device=None,
+ fdt_subpath=None, extra_args=''):
+
+ if device is None:
+ ip_cfg = "ip=dhcp"
+ else:
+ ip_cfg = "ip=:::::{device}:dhcp::".format(device=device)
+
+ fh.write(textwrap.dedent('''\
+ DEFAULT default
+ LABEL default
+ LINUX {kernel_url}
+ APPEND root=/dev/nfs {ip_cfg} nfsroot={rootfs_nfs_url} {extra_args}
+ ''').format(kernel_url=kernel_tftp_url, ip_cfg=ip_cfg,
+ rootfs_nfs_url=rootfs_nfs_url, extra_args=extra_args))
+ if fdt_subpath is not None:
+ fh.write("FDT {}\n".format(fdt_subpath))
+ fh.flush()
+
+ @contextlib.contextmanager
+ def local_pxeboot_config(self, tftproot, macaddr, ip, tftp_port,
+ nfsroot_dir, device=None):
+ kernel_tftp_url = 'tftp://{}:{}/kernel'.format(ip, tftp_port)
+ rootfs_nfs_url = '{}:{}'.format(ip, nfsroot_dir)
+ pxe_cfg_filename = _normalise_macaddr(macaddr)
+ pxe_cfg_path = os.path.join(tftproot, 'pxelinux.cfg', pxe_cfg_filename)
+ os.makedirs(os.path.dirname(pxe_cfg_path))
+ with open(pxe_cfg_path, 'w') as f:
+ self._write_pxe_config(fh=f, kernel_tftp_url=kernel_tftp_url,
+ rootfs_nfs_url=rootfs_nfs_url,
+ device=device,
+ extra_args=os.environ.get('KERNEL_ARGS',''))
+
+ try:
+ with self._local_symlink(
+ src=pxe_cfg_filename,
+ dst=os.path.join(tftproot,
+ 'pxelinux.cfg',
+ '01-' + pxe_cfg_filename)):
+ yield
+ finally:
+ os.unlink(pxe_cfg_path)
+
+ @contextlib.contextmanager
+ def remote_pxeboot_config(self, tftproot, kernel_tftproot, kernel_subpath,
+ fdt_subpath, rootfs_nfsroot, rootfs_subpath,
+ macaddr):
+ rootfs_nfs_url = '{}/{}'.format(rootfs_nfsroot, rootfs_subpath)
+ url = urlparse.urlsplit(kernel_tftproot)
+ kernel_tftp_url = '{}:{}'.format(url.netloc, kernel_subpath)
+ pxe_cfg_filename = _normalise_macaddr(macaddr)
+ url = urlparse.urlsplit(tftproot)
+ inst_cfg_path = os.path.join(url.path, 'pxelinux.cfg')
+ with tempfile.NamedTemporaryFile() as f:
+ self._write_pxe_config(
+ fh=f, kernel_tftp_url=kernel_tftp_url,
+ fdt_subpath=fdt_subpath,
+ rootfs_nfs_url=rootfs_nfs_url,
+ extra_args=os.environ.get('KERNEL_ARGS',''))
+ with self._remote_copy(
+ hostname=url.hostname, src=f.name,
+ dst=os.path.join(inst_cfg_path,
+ pxe_cfg_filename)), \
+ self._remote_symlink(
+ hostname=url.hostname,
+ src=pxe_cfg_filename,
+ dst=os.path.join(inst_cfg_path,
+ '01-' + pxe_cfg_filename)):
+ yield
+
+ @contextlib.contextmanager
+ def dhcp_server(self, interface, host_ip, target_ip, broadcast_ip):
+ with self._tempdir() as td:
+ leases_path = os.path.join(td, 'leases')
+ config_path = os.path.join(td, 'config')
+ stdout_path = os.path.join(td, 'stdout')
+ stderr_path = os.path.join(td, 'stderr')
+ pidfile_path = os.path.join(td, 'pid')
+ with open(config_path, 'w') as f:
+ f.write(textwrap.dedent('''\
+ start {target_ip}
+ end {target_ip}
+ interface {interface}
+ max_leases 1
+ lease_file {leases_path}
+ pidfile {pidfile_path}
+ boot_file pxelinux.0
+ option dns {host_ip}
+ option broadcast {broadcast_ip}
+ ''').format(**locals()))
+ with open(stdout_path, 'w') as stdout, \
+ open(stderr_path, 'w') as stderr:
+ sp = subprocess.Popen(['udhcpd', '-f', config_path], cwd=td,
+ stdin=open(os.devnull), stdout=stdout,
+ stderr=stderr)
+ try:
+ with executor(sp.pid):
+ yield
+ finally:
+ sp.terminate()
+
+ def get_interface_ip(self, interface):
+ ip_addresses = []
+ info = cliapp.runcmd(['ip', '-o', '-f', 'inet',
+ 'addr', 'show', interface]).rstrip('\n')
+ if info:
+ tokens = collections.deque(info.split()[1:])
+ ifname = tokens.popleft()
+ while tokens:
+ tok = tokens.popleft()
+ if tok == 'inet':
+ address = tokens.popleft()
+ address, netmask = address.split('/')
+ ip_addresses.append(address)
+ elif tok == 'brd':
+ tokens.popleft() # not interested in broadcast address
+ elif tok == 'scope':
+ tokens.popleft() # not interested in scope tag
+ else:
+ continue
+ if not ip_addresses:
+ raise cliapp.AppException('Interface %s has no addresses'
+ % interface)
+ if len(ip_addresses) > 1:
+ warnings.warn('Interface %s has multiple addresses, '
+ 'using first (%s)' % (interface, ip_addresses[0]))
+ return ip_addresses[0]
+
+ def ipmi_set_target_vlan(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ lan set 1 vlan id "$PXEBOOT_VLAN"
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please set the target\\'s vlan to $PXEBOOT_VLAN, \\
+ then enter \\"vlanned\\"
+ read
+ if [ "$REPLY" = vlanned ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_SET_VLAN_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_pxe_reboot_target(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN
+ default = textwrap.dedent('''\
+ set -- ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST"
+ "$@" chassis bootdev pxe
+ "$@" chassis power reset
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reboot the target in PXE mode, then\\
+ enter \\"pxe-booted\\"
+ read
+ if [ "$REPLY" = pxe-booted ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_PXE_REBOOT_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def wait_for_target_to_install(self):
+ command = os.environ.get(
+ 'PXEBOOT_WAIT_INSTALL_COMMAND',
+ textwrap.dedent('''\
+ while true; do
+ echo Please wait for the system to install, then \\
+ enter \\"installed\\"
+ read
+ if [ "$REPLY" = installed ]; then
+ break
+ fi
+ done
+ '''))
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_unset_target_vlan(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ lan set 1 vlan id off
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reset the target\\'s vlan, \\
+ then enter \\"unvlanned\\"
+ read
+ if [ "$REPLY" = unvlanned ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_UNSET_VLAN_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_reboot_target(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ chassis power reset
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reboot the target, then\\
+ enter \\"rebooted\\"
+ read
+ if [ "$REPLY" = rebooted ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_REBOOT_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def process_args(self, (temp_root, macaddr)):
+ interface = os.environ.get('PXEBOOT_DEPLOYER_INTERFACE', None)
+ target_interface = os.environ.get('PXEBOOT_TARGET_INTERFACE', None)
+ vlan = os.environ.get('PXEBOOT_VLAN')
+ if vlan is not None: vlan = int(vlan)
+ mode = os.environ.get('PXEBOOT_MODE')
+ if mode is None:
+ if interface:
+ if vlan is not None:
+ mode = 'spawn-vlan'
+ else:
+ if 'PXEBOOT_CONFIG_TFTP_ADDRESS' in os.environ:
+ mode = 'existing-dhcp'
+ else:
+ mode = 'spawn-novlan'
+ else:
+ mode = 'existing-server'
+ assert mode in ('spawn-vlan', 'spawn-novlan', 'existing-dhcp',
+ 'existing-server')
+ if mode == 'spawn-vlan':
+ with self.vlan(interface=interface, vlan=vlan) \
+ as (host_ip, target_ip, broadcast_ip), \
+ self.tftp_server(host_ip=host_ip, interface=interface) \
+ as (tftp_port, tftproot), \
+ self.local_pxelinux(tftproot=tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \
+ self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr,
+ device=target_interface,
+ ip=host_ip, tftp_port=tftp_port,
+ nfsroot_dir=temp_root), \
+ self.dhcp_server(interface=interface, host_ip=host_ip,
+ target_ip=target_ip,
+ broadcast_ip=broadcast_ip):
+ self.ipmi_set_target_vlan()
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_unset_target_vlan()
+ self.ipmi_reboot_target()
+ elif mode == 'spawn-novlan':
+ with self.static_ip(interface=interface) as (host_ip, target_ip,
+ broadcast_ip), \
+ self.tftp_server(host_ip=host_ip, interface=interface,
+ tftp_port=69) \
+ as (tftp_port, tftproot), \
+ self.local_pxelinux(tftproot=tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \
+ self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr,
+ device=target_interface,
+ ip=host_ip, tftp_port=tftp_port,
+ nfsroot_dir=temp_root), \
+ self.dhcp_server(interface=interface, host_ip=host_ip,
+ target_ip=target_ip,
+ broadcast_ip=broadcast_ip):
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ elif mode == 'existing-dhcp':
+ ip = self.get_interface_ip(interface)
+ config_tftpaddr = os.environ['PXEBOOT_CONFIG_TFTP_ADDRESS']
+ with self.tftp_server(ip=ip, interface=interface, tftp_port=69) \
+ as (tftp_port, tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, client_ip=''):
+ kernel_tftproot = 'tftp://{}:{}/'.format(ip, tftp_port)
+ rootfs_nfsroot = '{}:{}'.format(ip, temp_root)
+ with self.remote_pxeboot_config(
+ tftproot=config_tftpaddr,
+ kernel_tftproot=kernel_tftproot,
+ kernel_subpath='kernel',
+ rootfs_nfsroot=nfsroot,
+ rootfs_subpath='',
+ macaddr=macaddr):
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ elif mode == 'existing-server':
+ config_tftpaddr = os.environ[ 'PXEBOOT_CONFIG_TFTP_ADDRESS']
+ kernel_tftpaddr = os.environ.get('PXEBOOT_KERNEL_TFTP_ADDRESS',
+ config_tftpaddr)
+ url = urlparse.urlsplit(kernel_tftpaddr)
+ kernel_tftproot = os.environ.get('PXEBOOT_KERNEL_TFTP_ROOT',
+ 'tftp://%s/%s' % (url.hostname,
+ url.path))
+ rootfs_rsync = os.environ['PXEBOOT_ROOTFS_RSYNC_ADDRESS']
+ url = urlparse.urlsplit(rootfs_rsync)
+ nfsroot = os.environ.get('PXEBOOT_ROOTFS_NFSROOT',
+ '%s:%s' % (url.hostname, url.path))
+ with self.remote_kernel(rootfs=temp_root, tftp_url=kernel_tftpaddr,
+ macaddr=macaddr) as kernel_subpath, \
+ self.remote_fdt(rootfs=temp_root, tftp_url=kernel_tftpaddr,
+ macaddr=macaddr) as fdt_subpath, \
+ self.remote_nfsroot(rootfs=temp_root, rsync_url=rootfs_rsync, \
+ macaddr=macaddr) as rootfs_subpath, \
+ self.remote_pxeboot_config(tftproot=config_tftpaddr,
+ kernel_tftproot=kernel_tftproot,
+ kernel_subpath=kernel_subpath,
+ fdt_subpath=fdt_subpath,
+ rootfs_nfsroot=nfsroot,
+ rootfs_subpath=rootfs_subpath,
+ macaddr=macaddr):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ if not persist:
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ else:
+ cliapp.AppException('Invalid PXEBOOT_MODE: %s' % mode)
+
+PXEBoot().run()
diff --git a/extensions/pxeboot.write.help b/extensions/pxeboot.write.help
new file mode 100644
index 00000000..7cb78bce
--- /dev/null
+++ b/extensions/pxeboot.write.help
@@ -0,0 +1,166 @@
+help: >
+ pxeboot.write extension.
+
+
+ This write extension will serve your generated system over NFS to
+ the target system.
+
+ In all modes `location` is the mac address of the interface that
+ the target will PXE boot from. This is used so that the target will
+ load the configuration file appropriate to it.
+
+
+ # `PXEBOOT_MODE`
+
+
+ It has 4 modes, which can be specified with PXEBOOT_MODE, or inferred
+ from which parameters are passed:
+
+
+ ## spawn-vlan
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_VLAN to configure
+ the target to pxeboot on a vlan and spawn a dhcp, nfs and tftp
+ server. This is potentially the fastest, since it doesn't need to
+ copy data to other servers.
+
+ This will create a vlan interface for the interface specified in
+ PXEBOOT_DEPLOYER_INTERFACE and spawn a dhcp server which serves
+ pxelinux.0, a configuration file and a kernel image from itself.
+
+ The configuration file informs the target to boot with a kernel
+ command-line that uses an NFS root served from the deployment host.
+
+
+ ## spawn-novlan
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE without PXEBOOT_VLAN to configure
+ like `spawn-vlan`, but without creating the vlan interface.
+
+ This assumes that you have exclusive access to the interface, such
+ as if you're plugged in to the device directly, or your interface
+ is vlanned by your infrastructure team.
+
+ This is required if you are serving from a VM and bridging it to the
+ correct network via macvtap. For this to work, you need to macvtap
+ bridge to a pre-vlanned interface on your host machine.
+
+
+ ## existing-dhcp
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_CONFIG_TFTP_ADDRESS
+ to put config on an existing tftp server, already configured by the
+ dhcp server.
+
+ This spawns a tftp server and configures the local nfs server, but
+ doesn't spawn a dhcp server. This is useful if you have already got a
+ dhcp server that serves PXE images.
+
+ PXEBOOT_CONFIG_TFTP_ADDRESS is a URL in the form `sftp://$HOST/$PATH`.
+ The configuration file is copied to `$PATH/pxelinux.cfg/` on the
+ target identified by `$HOST`.
+
+
+ ## existing-server
+
+
+ Specify at least PXEBOOT_CONFIG_TFTP_ADDRESS and
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS to specify existing servers to copy
+ config, kernels and the rootfs to.
+
+ Configuration is copied to the target as `existing-dhcp`.
+
+ Specify PXEBOOT_KERNEL_TFTP_ADDRESS if the tftp server that the
+ kernel must be downloaded from is different to that of the pxelinux
+ configuration file.
+
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS is a rsync URL describing where to copy
+ nfsroots to where they will be exported by the NFS server.
+
+ Specify PXEBOOT_ROOTFS_NFSROOT if the nfsroot appears as a different
+ address from the target's perspective.
+
+
+ # IPMI commands
+
+
+ After the PXE boot has been set up, the target needs to be rebooted
+ in PXE mode.
+
+ If the target is IPMI enabled, you can set `IPMI_USER`, `IPMI_HOST`
+ and `IPMI_PASSWORD` to make it reboot the target into netboot mode
+ automatically.
+
+ If they are not specified, then instructions will be displayed, and
+ `pxeboot.write` will wait for you to finish.
+
+ If there are command-line automation tools for rebooting the target
+ in netboot mode, then appropriate commands can be defined in the
+ following variables.
+
+
+ ## PXEBOOT_PXE_REBOOT_COMMAND
+
+
+ This command will be used to reboot the target device with its boot
+ device set to PXE boot.
+
+
+ ## PXEBOOT_REBOOT_COMMAND
+
+
+ This command will be used to reboot the target device in its default
+ boot mode.
+
+
+ ## PXEBOOT_WAIT_INSTALL_COMMAND
+
+
+ If it is possible for the target to notify you that it has finished
+ installing, you can put a command in here to wait for the event.
+
+
+ # Misc
+
+
+ ## KERNEL_ARGS
+
+
+ Additional kernel command line options. Note that the following
+ options
+
+ root=/dev/nfs ip=dhcp nfsroot=$NFSROOT`
+
+ are implicitly added by the extension.
+
+
+ ## DTB_PATH
+
+
+ Location in the deployed root filesystem of the Flattened Device
+ Tree blob (FDT) to use.
+
+
+ ## PXE_INSTALLER
+
+
+ If set to `no`, `False` or any other YAML value for false, the
+ remotely installed rootfs, kernel, bootloader config file and
+ device tree blob if specified, will not be removed after the
+ deployment finishes. This variable is only meanful on the
+ `existing-server` mode.
+
+
+ ## PXEBOOT_TARGET_INTERFACE
+
+ Name of the interface of the target to pxeboot from. Some targets
+ with more than one interface try to get the rootfs from a different
+ interface than the interface from where the pxeboot server is
+ reachable. Using this variable, the kernel arguments will be filled
+ to include the device.
+
+ Note that the name of this interface is the kernel's default name,
+ usually called ethX, and is non-determinisic.
diff --git a/extensions/rawdisk.check b/extensions/rawdisk.check
new file mode 100755
index 00000000..9be0ce91
--- /dev/null
+++ b/extensions/rawdisk.check
@@ -0,0 +1,53 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'rawdisk' write extension'''
+
+import cliapp
+
+import morphlib.writeexts
+
+import os
+
+
+class RawdiskCheckExtension(morphlib.writeexts.WriteExtension):
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ location = args[0]
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ if not self.is_device(location):
+ if not os.path.isfile(location):
+ raise cliapp.AppException(
+ 'Cannot upgrade %s: it is not an existing disk image' %
+ location)
+
+ version_label = os.environ.get('VERSION_LABEL')
+ if version_label is None:
+ raise cliapp.AppException(
+ 'VERSION_LABEL was not given. It is required when '
+ 'upgrading an existing system.')
+ else:
+ if not self.is_device(location):
+ if os.path.exists(location):
+ raise cliapp.AppException(
+ 'Target %s already exists. Use `morph upgrade` if you '
+ 'want to update an existing image.' % location)
+
+RawdiskCheckExtension().run()
diff --git a/extensions/rawdisk.write b/extensions/rawdisk.write
new file mode 100755
index 00000000..6f2d45ba
--- /dev/null
+++ b/extensions/rawdisk.write
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# Copyright (C) 2012-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for raw disk images.'''
+
+
+import cliapp
+import os
+import sys
+import time
+import tempfile
+
+import morphlib.writeexts
+
+
+class RawDiskWriteExtension(morphlib.writeexts.WriteExtension):
+
+ '''See rawdisk.write.help for documentation'''
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+ upgrade = self.get_environment_boolean('UPGRADE')
+
+ if upgrade:
+ self.upgrade_local_system(location, temp_root)
+ else:
+ try:
+ if not self.is_device(location):
+ with self.created_disk_image(location):
+ self.format_btrfs(location)
+ self.create_system(temp_root, location)
+ self.status(msg='Disk image has been created at %s' %
+ location)
+ else:
+ self.format_btrfs(location)
+ self.create_system(temp_root, location)
+ self.status(msg='System deployed to %s' % location)
+ except Exception:
+ self.status(msg='Failure to deploy system to %s' %
+ location)
+ raise
+
+ def upgrade_local_system(self, raw_disk, temp_root):
+ self.complete_fstab_for_btrfs_layout(temp_root)
+
+ with self.mount(raw_disk) as mp:
+ version_label = self.get_version_label(mp)
+ self.status(msg='Updating image to a new version with label %s' %
+ version_label)
+
+ version_root = os.path.join(mp, 'systems', version_label)
+ os.mkdir(version_root)
+
+ old_orig = os.path.join(mp, 'systems', 'default', 'orig')
+ new_orig = os.path.join(version_root, 'orig')
+ cliapp.runcmd(
+ ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig])
+
+ cliapp.runcmd(
+ ['rsync', '-a', '--checksum', '--numeric-ids', '--delete',
+ temp_root + os.path.sep, new_orig])
+
+ self.create_run(version_root)
+
+ default_path = os.path.join(mp, 'systems', 'default')
+ if os.path.exists(default_path):
+ os.remove(default_path)
+ else:
+ # we are upgrading and old system that does
+ # not have an updated extlinux config file
+ if self.bootloader_config_is_wanted():
+ self.generate_bootloader_config(mp)
+ self.install_bootloader(mp)
+ os.symlink(version_label, default_path)
+
+ if self.bootloader_config_is_wanted():
+ self.install_kernel(version_root, temp_root)
+
+ def get_version_label(self, mp):
+ version_label = os.environ.get('VERSION_LABEL')
+
+ if version_label is None:
+ raise cliapp.AppException('VERSION_LABEL was not given')
+
+ if os.path.exists(os.path.join(mp, 'systems', version_label)):
+ raise cliapp.AppException('VERSION_LABEL %s already exists'
+ % version_label)
+
+ return version_label
+
+
+RawDiskWriteExtension().run()
diff --git a/extensions/rawdisk.write.help b/extensions/rawdisk.write.help
new file mode 100644
index 00000000..52ed73fb
--- /dev/null
+++ b/extensions/rawdisk.write.help
@@ -0,0 +1,82 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Write a system produced by Morph to a physical disk, or to a file that can
+ be used as a virtual disk. The target will be formatted as a single Btrfs
+ partition, with the system image written to a subvolume in /systems, and
+ other subvolumes created for /home, /opt, /root, /srv and /var.
+
+ When written to a physical drive, the drive can be used as the boot device
+ for a 'real' machine.
+
+ When written to a file, the file can be used independently of `morph` to
+ create virtual machines with KVM / libvirt, OpenStack or, after converting
+ it to VDI format, VirtualBox.
+
+ `morph deploy` will fail if the file specified by `location` already
+ exists.
+
+ If used in `morph upgrade`, the rootfs produced by 'morph build' is added
+ to the existing raw disk image or device as an additional btrfs sub-volume.
+ `morph upgrade` will fail if the file specified by `location` does not
+ exist, or is not a Baserock raw disk image. (Most users are unlikely to
+ need or use this functionality: it is useful mainly for developers working
+ on the Baserock tools.)
+
+ Parameters:
+
+ * location: the pathname of the disk image to be created/upgraded, or the
+ path to the physical device.
+
+ * VERSION_LABEL=label - should contain only alpha-numeric
+ characters and the '-' (hyphen) character. Mandatory if being used with
+ `morph update`
+
+ * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to
+ tell Linux to use, rather than booting the rootfs directly.
+
+ * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree
+ binary - Give the full path (without a leading /) to the location of the
+ DTB in the built system image . The deployment will fail if `path` does
+ not exist.
+
+ * BOOTLOADER_INSTALL=value: the bootloader to be installed
+ **(MANDATORY)** for non-x86 systems
+
+ allowed values =
+ - 'extlinux' (default) - the extlinux bootloader will
+ be installed
+ - 'none' - no bootloader will be installed by `morph deploy`. A
+ bootloader must be installed manually. This value must be used when
+ deploying non-x86 systems such as ARM.
+
+ * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used.
+ If not specified for x86-32 and x86-64 systems, 'extlinux' will be used
+
+ allowed values =
+ - 'extlinux'
+
+ * KERNEL_ARGS=args: optional additional kernel command-line parameters to
+ be appended to the default set. The default set is:
+
+ 'rw init=/sbin/init rootfstype=btrfs \
+ rootflags=subvol=systems/default/run \
+ root=[name or UUID of root filesystem]'
+
+ (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt)
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/extensions/sdk.write b/extensions/sdk.write
new file mode 100755
index 00000000..8d3d2a63
--- /dev/null
+++ b/extensions/sdk.write
@@ -0,0 +1,284 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+
+set -eu
+
+die(){
+ echo "$@" >&2
+ exit 1
+}
+
+shellescape(){
+ echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'"
+}
+
+########################## END OF COMMON HEADER ###############################
+#
+# The above lines, as well as being part of this script, are copied into the
+# self-installing SDK blob's header script, as a means of re-using content.
+#
+
+help(){
+ cat <<EOF
+sdk.write: Write extension for making an SDK installer.
+
+Description:
+ This is a write extension for producing a self-installing SDK blob
+ from a configured system.
+
+ It generates a shell script header and appends the rootfs as a tarball,
+ which the header later extracts, and performs various configuration
+ to have it useable as a relocatable toolchain.
+
+ This is similar to what the shar and makeself programs do, but we
+ need custom setup, so shar isn't appropriate, and makeself's api is
+ insufficiently flexible for our requirements.
+
+ The toolchain relocation is handled by sedding every text file in the
+ SDK directory, and using the patchelf from inside the SDK to change
+ every ELF binary in the toolchain to use the linker and libraries from
+ inside the SDK.
+
+ The ELF patching is required so that the SDK can work independently
+ of the versions of libraries installed on the host system.
+
+Location: Path to create the script at
+
+ENV VARS:
+ PREFIX (optional) The prefix the toolchain is built with
+ defaults to /usr
+ TARGET (mandatory) The gnu triplet the toolchain was built with
+EOF
+}
+
+ROOTDIR="$1"
+OUTPUT_SCRIPT="$2"
+PREFIX=${PREFIX-/usr}
+
+find_patchelf(){
+ # Look for patchelf in the usual places
+ for binpath in /bin "$PREFIX/bin"; do
+ if [ -x "$ROOTDIR$binpath/patchelf" ]; then
+ echo "$binpath/patchelf"
+ return
+ fi
+ done
+ die "patchelf not found in rootfs"
+}
+
+read_elf_interpreter(){
+ # Use readelf and sed to find the interpreter a binary uses this is
+ # required since we can't yet guarantee that the deploying system
+ # contains patchelf
+ readelf --wide --program-headers "$1" |
+ sed -nr -f /proc/self/fd/3 3<<'EOF'
+/\s+INTERP/{
+ n # linker is on line after INTERP line
+ s/^\s*\[Requesting program interpreter: (.*)]$/\1/
+ p # in -n mode, so need to print our text
+}
+EOF
+}
+
+find_lib_paths(){
+ local found_first=false
+ for libpath in "$PREFIX/lib32" "$PREFIX/lib64" "$PREFIX/lib" \
+ /lib32 /lib64 /lib; do
+ if [ -e "$ROOTDIR$libpath" ]; then
+ if "$found_first"; then
+ printf ":%s" "$libpath"
+ else
+ printf "%s" "$libpath"
+ found_first=true
+ fi
+ fi
+ done
+}
+
+# Create script with common header
+header_end=$(grep -En -m1 -e '^#+ END OF COMMON HEADER #+$' "$0" | cut -d: -f1)
+head -n "$header_end" "$0" | install -m 755 -D /dev/stdin "$OUTPUT_SCRIPT"
+
+# Determine any config
+PATCHELF="$(find_patchelf)"
+RTLD="$(read_elf_interpreter "$ROOTDIR$PATCHELF")"
+LIB_PATH="${LIB_PATH-$(find_lib_paths)}"
+
+# Append deploy-time config to header
+cat >>"$OUTPUT_SCRIPT" <<EOF
+#################### START OF DEPLOY TIME CONFIGURATION #######################
+
+TARGET=$(shellescape "$TARGET")
+PREFIX=$(shellescape "$PREFIX")
+PATCHELF=$(shellescape "$PATCHELF")
+RTLD=$(shellescape "$RTLD")
+LIB_PATH=$(shellescape "$LIB_PATH")
+
+##################### END OF DEPLOY TIME CONFIGURATION ########################
+EOF
+
+# Append deployment script
+cat >>"$OUTPUT_SCRIPT" <<'EOF'
+########################### START OF HEADER SCRIPT ############################
+
+usage(){
+ cat <<USAGE
+usage: $0 TOOLCHAIN_PATH
+USAGE
+}
+
+if [ "$#" != 1 ]; then
+ echo TOOLCHAIN_PATH not given >&2
+ usage >&2
+ exit 1
+fi
+
+TOOLCHAIN_PATH="$(readlink -f \"$1\")"
+
+sedescape(){
+ # Escape the passed in string so it can be safely interpolated into
+ # a sed expression as a literal value.
+ echo "$1" | sed -e 's/[\/&]/\\&/g'
+}
+
+prepend_to_path_elements(){
+ # Prepend $1 to every entry in the : separated list specified as $2.
+ local prefix="$1"
+ (
+ # Split path into components
+ IFS=:
+ set -- $2
+ # Print path back out with new prefix
+ printf %s "$prefix/$1"
+ shift
+ for arg in "$@"; do
+ printf ":%s" "$prefix/$arg"
+ done
+ )
+}
+
+extract_rootfs(){
+ # Extract the bzipped tarball at the end of the script passed as $1
+ # to the path specified as $2
+ local selfextractor="$1"
+ local target="$2"
+ local script_end="$(($(\
+ grep -aEn -m1 -e '^#+ END OF HEADER SCRIPT #+$' "$selfextractor" |
+ cut -d: -f1) + 1 ))"
+ mkdir -p "$target"
+ tail -n +"$script_end" "$selfextractor" | tar -xj -C "$target" .
+}
+
+amend_text_file_paths(){
+ # Replace all instances of $3 with $4 in the directory specified by $1
+ # excluding the subdirectory $2
+ local root="$1"
+ local inner_sysroot="$2"
+ local old_prefix="$3"
+ local new_prefix="$4"
+ find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \
+ -exec sh -c 'file "$1" | grep -q text' - {} \; \
+ -exec sed -i -e \
+ "s/$(sedescape "$old_prefix")/$(sedescape "$new_prefix")/g" {} +
+}
+
+filter_patchelf_errors(){
+ # Filter out warnings from patchelf that are acceptable
+ # The warning that it's making a file bigger is just noise
+ # The warning about not being an ELF executable just means we got a
+ # false positive from file that it was an ELF binary
+ # Failing to find .interp is because for convenience, we set the
+ # interpreter in the same command as setting the rpath, even though
+ # we give it both executables and libraries.
+ grep -v -e 'warning: working around a Linux kernel bug' \
+ -e 'not an ELF executable' \
+ -e 'cannot find section .interp'
+}
+
+patch_elves(){
+ # Set the interpreter and library paths of ELF binaries in $1,
+ # except for the $2 subdirectory, using the patchelf command in the
+ # toolchain specified as $3, so that it uses the linker specified
+ # as $4 as the interpreter, and the runtime path specified by $5.
+ #
+ # The patchelf inside the toolchain is used to ensure that it works
+ # independently of the availability of patchelf on the host.
+ #
+ # This is possible by invoking the linker directly and specifying
+ # --linker-path as the RPATH we want to set the binaries to use.
+ local root="$1"
+ local inner_sysroot="$2"
+ local patchelf="$3"
+ local linker="$4"
+ local lib_path="$5"
+ find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \
+ -type f -perm +u=x \
+ -exec sh -c 'file "$1" | grep -q "ELF"' - {} \; \
+ -exec "$linker" --library-path "$lib_path" \
+ "$patchelf" --set-interpreter "$linker" \
+ --set-rpath "$lib_path" {} \; 2>&1 \
+ | filter_patchelf_errors
+}
+
+generate_environment_setup(){
+ local target="$1"
+ install -m 644 -D /dev/stdin "$target" <<ENVSETUP
+export PATH=$(shellescape "$TOOLCHAIN_PATH/usr/bin"):"\$PATH"
+export TARGET_PREFIX=$(shellescape "$TARGET"-)
+export CC=$(shellescape "$TARGET-gcc")
+export CXX=$(shellescape "$TARGET-g++")
+export CPP=$(shellescape "$TARGET-gcc -E")
+export AS=$(shellescape "$TARGET-as")
+export LD=$(shellescape "$TARGET-ld")
+export STRIP=$(shellescape "$TARGET-strip")
+export RANLIB=$(shellescape "$TARGET-ranlib")
+export OBJCOPY=$(shellescape "$TARGET-objcopy")
+export OBJDUMP=$(shellescape "$TARGET-objdump")
+export AR=$(shellescape "$TARGET-ar")
+export NM=$(shellescape "$TARGET-nm")
+ENVSETUP
+}
+
+SYSROOT="$TOOLCHAIN_PATH$PREFIX/$TARGET/sys-root"
+PATCHELF="$TOOLCHAIN_PATH$PATCHELF"
+RTLD="$TOOLCHAIN_PATH$RTLD"
+OLD_PREFIX="$PREFIX"
+NEW_PREFIX="$TOOLCHAIN_PATH/$PREFIX"
+RPATH="$(prepend_to_path_elements "$TOOLCHAIN_PATH" "$LIB_PATH")"
+ENV_SETUP_FILE="$(dirname "$TOOLCHAIN_PATH")/environment-setup-$TARGET"
+
+echo Writing environment setup script to "$ENV_SETUP_FILE"
+generate_environment_setup "$ENV_SETUP_FILE"
+
+echo Extracting rootfs
+extract_rootfs "$0" "$TOOLCHAIN_PATH"
+
+echo "Relocating prefix references of $OLD_PREFIX to $NEW_PREFIX in" \
+ "the toolchain's textual config files."
+amend_text_file_paths "$TOOLCHAIN_PATH" "$SYSROOT" "$OLD_PREFIX" "$NEW_PREFIX"
+
+echo "Patching ELF binary files' interpreter and runtime library paths" \
+ "to refer to libraries within the toolchain"
+patch_elves "$TOOLCHAIN_PATH" "$SYSROOT" "$PATCHELF" "$RTLD" "$RPATH"
+
+exit
+############################ END OF HEADER SCRIPT #############################
+EOF
+
+# Append rootfs as tarball
+tar -C "$1" -cj >>"$OUTPUT_SCRIPT" .
diff --git a/extensions/set-hostname.configure b/extensions/set-hostname.configure
new file mode 100755
index 00000000..4b2424d8
--- /dev/null
+++ b/extensions/set-hostname.configure
@@ -0,0 +1,26 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Set hostname on system from HOSTNAME.
+
+
+set -e
+
+if [ -n "$HOSTNAME" ]
+then
+ echo "$HOSTNAME" > "$1/etc/hostname"
+fi
+
diff --git a/extensions/simple-network.configure b/extensions/simple-network.configure
new file mode 100755
index 00000000..4a70f311
--- /dev/null
+++ b/extensions/simple-network.configure
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''A Morph deployment configuration extension to handle network configutation
+
+This extension prepares /etc/network/interfaces and networkd .network files
+in /etc/systemd/network/ with the interfaces specified during deployment.
+
+If no network configuration is provided, eth0 will be configured for DHCP
+with the hostname of the system in the case of /etc/network/interfaces.
+In the case of networkd, any interface starting by e* will be configured
+for DHCP
+'''
+
+
+import os
+import sys
+import errno
+import cliapp
+
+import morphlib
+
+
+class SimpleNetworkError(morphlib.Error):
+ '''Errors associated with simple network setup'''
+ pass
+
+
+class SimpleNetworkConfigurationExtension(cliapp.Application):
+ '''Configure /etc/network/interfaces and generate networkd .network files
+
+ Reading NETWORK_CONFIG, this extension sets up /etc/network/interfaces
+ and .network files in /etc/systemd/network/.
+ '''
+
+ def process_args(self, args):
+ network_config = os.environ.get("NETWORK_CONFIG")
+
+ self.rename_networkd_chunk_file(args)
+
+ if network_config is None:
+ self.generate_default_network_config(args)
+ else:
+ self.status(msg="Processing NETWORK_CONFIG=%(nc)s",
+ nc=network_config)
+
+ stanzas = self.parse_network_stanzas(network_config)
+
+ self.generate_interfaces_file(args, stanzas)
+ self.generate_networkd_files(args, stanzas)
+
+ def rename_networkd_chunk_file(self, args):
+ """Rename the 10-dchp.network file generated in the systemd chunk
+
+ The systemd chunk will place something in 10-dhcp.network, which will
+ have higher precedence than anything added in this extension (we
+ start at 50-*).
+
+ We should check for that file and rename it instead remove it in
+ case the file is being used by the user.
+
+ Until both the following happen, we should continue to rename that
+ default config file:
+
+ 1. simple-network.configure is always run when systemd is included
+ 2. We've been building systems without systemd including that default
+ networkd config for long enough that nobody should be including
+ that config file.
+ """
+ file_path = os.path.join(args[0], "etc", "systemd", "network",
+ "10-dhcp.network")
+
+ if os.path.isfile(file_path):
+ try:
+ os.rename(file_path, file_path + ".morph")
+ self.status(msg="Renaming networkd file from systemd chunk: \
+ %(f)s to %(f)s.morph", f=file_path)
+ except OSError:
+ pass
+
+ def generate_default_network_config(self, args):
+ """Generate default network config: DHCP in all the interfaces"""
+
+ default_network_config_interfaces = "lo:loopback;" \
+ "eth0:dhcp,hostname=$(hostname)"
+ default_network_config_networkd = "e*:dhcp"
+
+ stanzas_interfaces = self.parse_network_stanzas(
+ default_network_config_interfaces)
+ stanzas_networkd = self.parse_network_stanzas(
+ default_network_config_networkd)
+
+ self.generate_interfaces_file(args, stanzas_interfaces)
+ self.generate_networkd_files(args, stanzas_networkd)
+
+ def generate_interfaces_file(self, args, stanzas):
+ """Generate /etc/network/interfaces file"""
+
+ iface_file = self.generate_iface_file(stanzas)
+
+ directory_path = os.path.join(args[0], "etc", "network")
+ self.make_sure_path_exists(directory_path)
+ file_path = os.path.join(directory_path, "interfaces")
+ with open(file_path, "w") as f:
+ f.write(iface_file)
+
+ def generate_iface_file(self, stanzas):
+ """Generate an interfaces file from the provided stanzas.
+
+ The interfaces will be sorted by name, with loopback sorted first.
+ """
+
+ def cmp_iface_names(a, b):
+ a = a['name']
+ b = b['name']
+ if a == "lo":
+ return -1
+ elif b == "lo":
+ return 1
+ else:
+ return cmp(a,b)
+
+ return "\n".join(self.generate_iface_stanza(stanza)
+ for stanza in sorted(stanzas, cmp=cmp_iface_names))
+
+ def generate_iface_stanza(self, stanza):
+ """Generate an interfaces stanza from the provided data."""
+
+ name = stanza['name']
+ itype = stanza['type']
+ lines = ["auto %s" % name, "iface %s inet %s" % (name, itype)]
+ lines += [" %s %s" % elem for elem in stanza['args'].items()]
+ lines += [""]
+ return "\n".join(lines)
+
+ def generate_networkd_files(self, args, stanzas):
+ """Generate .network files"""
+
+ for i, stanza in enumerate(stanzas, 50):
+ iface_file = self.generate_networkd_file(stanza)
+
+ if iface_file is None:
+ continue
+
+ directory_path = os.path.join(args[0], "etc", "systemd", "network")
+ self.make_sure_path_exists(directory_path)
+ file_path = os.path.join(directory_path,
+ "%s-%s.network" % (i, stanza['name']))
+
+ with open(file_path, "w") as f:
+ f.write(iface_file)
+
+ def generate_networkd_file(self, stanza):
+ """Generate an .network file from the provided data."""
+
+ name = stanza['name']
+ itype = stanza['type']
+ pairs = stanza['args'].items()
+
+ if itype == "loopback":
+ return
+
+ lines = ["[Match]"]
+ lines += ["Name=%s\n" % name]
+ lines += ["[Network]"]
+ if itype == "dhcp":
+ lines += ["DHCP=yes"]
+ else:
+ lines += self.generate_networkd_entries(pairs)
+
+ return "\n".join(lines)
+
+ def generate_networkd_entries(self, pairs):
+ """Generate networkd configuration entries with the other parameters"""
+
+ address = None
+ netmask = None
+ gateway = None
+ dns = None
+ lines = []
+
+ for pair in pairs:
+ if pair[0] == 'address':
+ address = pair[1]
+ elif pair[0] == 'netmask':
+ netmask = pair[1]
+ elif pair[0] == 'gateway':
+ gateway = pair[1]
+ elif pair[0] == 'dns':
+ dns = pair[1]
+
+ if address and netmask:
+ network_suffix = self.convert_net_mask_to_cidr_suffix (netmask);
+ address_line = address + '/' + str(network_suffix)
+ lines += ["Address=%s" % address_line]
+ elif address or netmask:
+ raise Exception('address and netmask must be specified together')
+
+ if gateway:
+ lines += ["Gateway=%s" % gateway]
+
+ if dns:
+ lines += ["DNS=%s" % dns]
+
+ return lines
+
+ def convert_net_mask_to_cidr_suffix(self, mask):
+ """Convert dotted decimal form of a subnet mask to CIDR suffix notation
+
+ For example: 255.255.255.0 -> 24
+ """
+ return sum(bin(int(x)).count('1') for x in mask.split('.'))
+
+ def parse_network_stanzas(self, config):
+ """Parse a network config environment variable into stanzas.
+
+ Network config stanzas are semi-colon separated.
+ """
+
+ return [self.parse_network_stanza(s) for s in config.split(";")]
+
+ def parse_network_stanza(self, stanza):
+ """Parse a network config stanza into name, type and arguments.
+
+ Each stanza is of the form name:type[,arg=value]...
+
+ For example:
+ lo:loopback
+ eth0:dhcp
+ eth1:static,address=10.0.0.1,netmask=255.255.0.0
+ """
+ elements = stanza.split(",")
+ lead = elements.pop(0).split(":")
+ if len(lead) != 2:
+ raise SimpleNetworkError("Stanza '%s' is missing its type" %
+ stanza)
+ iface = lead[0]
+ iface_type = lead[1]
+
+ if iface_type not in ['loopback', 'static', 'dhcp']:
+ raise SimpleNetworkError("Stanza '%s' has unknown interface type"
+ " '%s'" % (stanza, iface_type))
+
+ argpairs = [element.split("=", 1) for element in elements]
+ output_stanza = { "name": iface,
+ "type": iface_type,
+ "args": {} }
+ for argpair in argpairs:
+ if len(argpair) != 2:
+ raise SimpleNetworkError("Stanza '%s' has bad argument '%r'"
+ % (stanza, argpair.pop(0)))
+ if argpair[0] in output_stanza["args"]:
+ raise SimpleNetworkError("Stanza '%s' has repeated argument"
+ " %s" % (stanza, argpair[0]))
+ output_stanza["args"][argpair[0]] = argpair[1]
+
+ return output_stanza
+
+ def make_sure_path_exists(self, path):
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST and os.path.isdir(path):
+ pass
+ else:
+ raise SimpleNetworkError("Unable to create directory '%s'"
+ % path)
+
+ def status(self, **kwargs):
+ '''Provide status output.
+
+ The ``msg`` keyword argument is the actual message,
+ the rest are values for fields in the message as interpolated
+ by %.
+
+ '''
+
+ self.output.write('%s\n' % (kwargs['msg'] % kwargs))
+
+SimpleNetworkConfigurationExtension().run()
diff --git a/extensions/ssh-rsync.check b/extensions/ssh-rsync.check
new file mode 100755
index 00000000..c3bdfd29
--- /dev/null
+++ b/extensions/ssh-rsync.check
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'ssh-rsync' write extension'''
+
+import cliapp
+
+import os
+
+import morphlib.writeexts
+
+class SshRsyncCheckExtension(morphlib.writeexts.WriteExtension):
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if not upgrade:
+ raise cliapp.AppException(
+ 'The ssh-rsync write is for upgrading existing remote '
+ 'Baserock machines. It cannot be used for an initial '
+ 'deployment.')
+
+ if os.environ.get('VERSION_LABEL', '') == '':
+ raise cliapp.AppException(
+ 'A VERSION_LABEL must be set when deploying an upgrade.')
+
+ location = args[0]
+ self.check_ssh_connectivity(location)
+ self.check_is_baserock_system(location)
+
+ # The new system that being deployed as an upgrade must contain
+ # baserock-system-config-sync and system-version-manager. However, the
+ # old system simply needs to have SSH and rsync.
+ self.check_command_exists(location, 'rsync')
+
+ def check_is_baserock_system(self, location):
+ output = cliapp.ssh_runcmd(location, ['sh', '-c',
+ 'test -d /baserock || echo -n dirnotfound'])
+ if output == 'dirnotfound':
+ raise cliapp.AppException('%s is not a baserock system'
+ % location)
+
+ def check_command_exists(self, location, command):
+ test = 'type %s > /dev/null 2>&1 || echo -n cmdnotfound' % command
+ output = cliapp.ssh_runcmd(location, ['sh', '-c', test])
+ if output == 'cmdnotfound':
+ raise cliapp.AppException(
+ "%s does not have %s" % (location, command))
+
+
+SshRsyncCheckExtension().run()
diff --git a/extensions/ssh-rsync.write b/extensions/ssh-rsync.write
new file mode 100755
index 00000000..6d596500
--- /dev/null
+++ b/extensions/ssh-rsync.write
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for upgrading systems over ssh.'''
+
+
+import contextlib
+import cliapp
+import os
+import sys
+import time
+import tempfile
+
+import morphlib.writeexts
+
+
+def ssh_runcmd_ignore_failure(location, command, **kwargs):
+ try:
+ return cliapp.ssh_runcmd(location, command, **kwargs)
+ except cliapp.AppException:
+ pass
+
+
+class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension):
+
+ '''See ssh-rsync.write.help for documentation'''
+
+
+ def find_root_disk(self, location):
+ '''Read /proc/mounts on location to find which device contains "/"'''
+
+ self.status(msg='Finding device that contains "/"')
+ contents = cliapp.ssh_runcmd(location, ['cat', '/proc/mounts'])
+ for line in contents.splitlines():
+ line_words = line.split()
+ if (line_words[1] == '/' and line_words[0] != 'rootfs'):
+ return line_words[0]
+
+ @contextlib.contextmanager
+ def _remote_mount_point(self, location):
+ self.status(msg='Creating remote mount point')
+ remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip()
+ try:
+ yield remote_mnt
+ finally:
+ self.status(msg='Removing remote mount point')
+ cliapp.ssh_runcmd(location, ['rmdir', remote_mnt])
+
+ @contextlib.contextmanager
+ def _remote_mount(self, location, root_disk, mountpoint):
+ self.status(msg='Mounting root disk')
+ cliapp.ssh_runcmd(location, ['mount', root_disk, mountpoint])
+ try:
+ yield
+ finally:
+ self.status(msg='Unmounting root disk')
+ cliapp.ssh_runcmd(location, ['umount', mountpoint])
+
+ @contextlib.contextmanager
+ def _created_version_root(self, location, remote_mnt, version_label):
+ version_root = os.path.join(remote_mnt, 'systems', version_label)
+ self.status(msg='Creating %(root)s', root=version_root)
+ cliapp.ssh_runcmd(location, ['mkdir', version_root])
+ try:
+ yield version_root
+ except BaseException as e:
+ # catch all, we always want to clean up
+ self.status(msg='Cleaning up %(root)s', root=version_root)
+ ssh_runcmd_ignore_failure(location, ['rmdir', version_root])
+ raise
+
+ def get_old_orig(self, location, remote_mnt):
+ '''Identify which subvolume to snapshot from'''
+
+ # rawdisk upgrades use 'default'
+ return os.path.join(remote_mnt, 'systems', 'default', 'orig')
+
+ @contextlib.contextmanager
+ def _created_orig_subvolume(self, location, remote_mnt, version_root):
+ self.status(msg='Creating "orig" subvolume')
+ old_orig = self.get_old_orig(location, remote_mnt)
+ new_orig = os.path.join(version_root, 'orig')
+ cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot',
+ old_orig, new_orig])
+ try:
+ yield new_orig
+ except BaseException as e:
+ ssh_runcmd_ignore_failure(
+ location, ['btrfs', 'subvolume', 'delete', new_orig])
+ raise
+
+ def populate_remote_orig(self, location, new_orig, temp_root):
+ '''Populate the subvolume version_root/orig on location'''
+
+ self.status(msg='Populating "orig" subvolume')
+ cliapp.runcmd(['rsync', '-as', '--checksum', '--numeric-ids',
+ '--delete', temp_root + os.path.sep,
+ '%s:%s' % (location, new_orig)])
+
+ @contextlib.contextmanager
+ def _deployed_version(self, location, version_label,
+ system_config_sync, system_version_manager):
+ self.status(msg='Calling system-version-manager to deploy upgrade')
+ deployment = os.path.join('/systems', version_label, 'orig')
+ cliapp.ssh_runcmd(location,
+ ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync,
+ system_version_manager, 'deploy', deployment])
+ try:
+ yield deployment
+ except BaseException as e:
+ self.status(msg='Cleaning up failed version installation')
+ cliapp.ssh_runcmd(location,
+ [system_version_manager, 'remove', version_label])
+ raise
+
+ def upgrade_remote_system(self, location, temp_root):
+ root_disk = self.find_root_disk(location)
+ uuid = cliapp.ssh_runcmd(location, ['blkid', '-s', 'UUID', '-o',
+ 'value', root_disk]).strip()
+
+ self.complete_fstab_for_btrfs_layout(temp_root, uuid)
+
+ version_label = os.environ['VERSION_LABEL']
+ autostart = self.get_environment_boolean('AUTOSTART')
+
+ with self._remote_mount_point(location) as remote_mnt, \
+ self._remote_mount(location, root_disk, remote_mnt), \
+ self._created_version_root(location, remote_mnt,
+ version_label) as version_root, \
+ self._created_orig_subvolume(location, remote_mnt,
+ version_root) as orig:
+ self.populate_remote_orig(location, orig, temp_root)
+ system_root = os.path.join(remote_mnt, 'systems',
+ version_label, 'orig')
+ config_sync = os.path.join(system_root, 'usr', 'bin',
+ 'baserock-system-config-sync')
+ version_manager = os.path.join(system_root, 'usr', 'bin',
+ 'system-version-manager')
+ with self._deployed_version(location, version_label,
+ config_sync, version_manager):
+ self.status(msg='Setting %(v)s as the new default system',
+ v=version_label)
+ cliapp.ssh_runcmd(location, [version_manager,
+ 'set-default', version_label])
+
+ if autostart:
+ self.status(msg="Rebooting into new system ...")
+ ssh_runcmd_ignore_failure(location, ['reboot'])
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+
+ self.upgrade_remote_system(location, temp_root)
+
+
+SshRsyncWriteExtension().run()
diff --git a/extensions/ssh-rsync.write.help b/extensions/ssh-rsync.write.help
new file mode 100644
index 00000000..f3f79ed5
--- /dev/null
+++ b/extensions/ssh-rsync.write.help
@@ -0,0 +1,50 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Upgrade a Baserock system which is already deployed:
+ - as a KVM/LibVirt, OpenStack or vbox-ssh virtual machine;
+ - on a Jetson board.
+
+ Copies a binary delta over to the target system and arranges for it
+ to be bootable.
+
+ The recommended way to use this extension is by calling `morph upgrade`.
+ Using `morph deploy --upgrade` is deprecated.
+
+ The upgrade will fail if:
+ - no VM is deployed and running at `location`;
+ - the target system is not a Baserock system;
+ - the target's filesystem and its layout are not compatible with that
+ created by `morph deploy`."
+
+ See also the 'Upgrading a Baserock installation' section of the 'Using
+ Baserock` page at wiki.baserock.org
+ http://wiki.baserock.org/devel-with/#index8h2
+
+ Parameters:
+
+ * location: the 'user@hostname' string that will be used by ssh and rsync.
+ 'user' will always be `root` and `hostname` the hostname or address of
+ the system being upgraded.
+
+ * VERSION_LABEL=label - **(MANDATORY)** should contain only alpha-numeric
+ characters and the '-' (hyphen) character.
+
+ * AUTOSTART=<VALUE>` - boolean. If it is set, the VM will be started when
+ it has been deployed.
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/extensions/sshkeys.configure b/extensions/sshkeys.configure
new file mode 100755
index 00000000..7a5a8379
--- /dev/null
+++ b/extensions/sshkeys.configure
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# Copyright 2014 Codethink Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+if [ "$SSHKEYS" ]
+then
+ install -d -m 700 "$1/root/.ssh"
+ echo Adding Key in "$SSHKEYS" to authorized_keys file
+ cat $SSHKEYS >> "$1/root/.ssh/authorized_keys"
+fi
diff --git a/extensions/strip-gplv3.configure b/extensions/strip-gplv3.configure
new file mode 100755
index 00000000..c08061ad
--- /dev/null
+++ b/extensions/strip-gplv3.configure
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# Copyright (C) 2013 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+''' A Morph configuration extension for removing gplv3 chunks from a system
+
+Using a hard-coded list of chunks, it will read the system's /baserock metadata
+to find the files created by that chunk, then remove them.
+
+'''
+
+import cliapp
+import re
+import os
+import json
+
+class StripGPLv3ConfigureExtension(cliapp.Application):
+ gplv3_chunks = [
+ ['autoconf', ''],
+ ['automake', ''],
+ ['bash', ''],
+ ['binutils', ''],
+ ['bison', ''],
+ ['ccache', ''],
+ ['cmake', ''],
+ ['flex', ''],
+ ['gawk', ''],
+ ['gcc', r'^.*lib.*\.so(\.\d+)*$'],
+ ['gdbm', ''],
+ ['gettext', ''],
+ ['gperf', ''],
+ ['groff', ''],
+ ['libtool', r'^.*lib.*\.so(\.\d+)*$'],
+ ['m4', ''],
+ ['make', ''],
+ ['nano', ''],
+ ['patch', ''],
+ ['rsync', ''],
+ ['texinfo-tarball', ''],
+ ]
+
+ def process_args(self, args):
+ target_root = args[0]
+ meta_dir = os.path.join(target_root, 'baserock')
+
+ for chunk in self.gplv3_chunks:
+ regex = os.path.join(meta_dir, "%s-[^-]\+\.meta" % chunk[0])
+ artifacts = self.runcmd(['find', meta_dir, '-regex', regex])
+
+ for artifact in artifacts.split():
+ self.remove_chunk(target_root, artifact, chunk[1])
+
+ os.symlink(os.path.join(os.sep, 'bin', 'busybox'),
+ os.path.join(target_root, 'usr', 'bin', 'awk'))
+
+ def remove_chunk(self, target_root, chunk, pattern):
+ chunk_meta_path = os.path.join(target_root, 'baserock', chunk)
+
+ with open(chunk_meta_path, 'r') as f:
+ chunk_meta_data = json.load(f)
+
+ if not 'contents' in chunk_meta_data:
+ raise cliapp.AppError('Chunk %s does not have a "contents" list'
+ % chunk)
+ updated_contents = []
+ for content_entry in reversed(chunk_meta_data['contents']):
+ pat = re.compile(pattern)
+ if len(pattern) == 0 or not pat.match(content_entry):
+ self.remove_content_entry(target_root, content_entry)
+ else:
+ updated_contents.append(content_entry)
+
+ def remove_content_entry(self, target_root, content_entry):
+ entry_path = os.path.join(target_root, './' + content_entry)
+ if not entry_path.startswith(target_root):
+ raise cliapp.AppException('%s is not in %s'
+ % (entry_path, target_root))
+ if os.path.exists(entry_path):
+ if os.path.islink(entry_path):
+ os.unlink(entry_path)
+ elif os.path.isfile(entry_path):
+ os.remove(entry_path)
+ elif os.path.isdir(entry_path):
+ if not os.listdir(entry_path):
+ os.rmdir(entry_path)
+ else:
+ raise cliapp.AppException('%s is not a link, file or directory'
+ % entry_path)
+StripGPLv3ConfigureExtension().run()
diff --git a/extensions/swift-build-rings.yml b/extensions/swift-build-rings.yml
new file mode 100644
index 00000000..1ffe9c37
--- /dev/null
+++ b/extensions/swift-build-rings.yml
@@ -0,0 +1,34 @@
+---
+- hosts: localhost
+ vars:
+ - rings:
+ - { name: account, port: 6002 }
+ - { name: container, port: 6001 }
+ - { name: object, port: 6000 }
+ remote_user: root
+ tasks:
+ - file: path={{ ansible_env.ROOT }}/etc/swift owner=root group=root state=directory
+
+ - name: Create ring
+ shell: swift-ring-builder {{ item.name }}.builder create {{ ansible_env.SWIFT_PART_POWER }}
+ {{ ansible_env.SWIFT_REPLICAS }} {{ ansible_env.SWIFT_MIN_PART_HOURS }}
+ with_items: rings
+
+ - name: Add each storage node to the ring
+ shell: swift-ring-builder {{ item[0].name }}.builder
+ add r1z1-{{ item[1].ip }}:{{ item[0].port }}/{{ item[1].device }} {{ item[1].weight }}
+ with_nested:
+ - rings
+ - ansible_env.SWIFT_STORAGE_DEVICES
+
+ - name: Rebalance the ring
+ shell: swift-ring-builder {{ item.name }}.builder rebalance {{ ansible_env.SWIFT_REBALANCE_SEED }}
+ with_items: rings
+
+ - name: Copy ring configuration files into place
+ copy: src={{ item.name }}.ring.gz dest={{ ansible_env.ROOT }}/etc/swift
+ with_items: rings
+
+ - name: Copy ring builder files into place
+ copy: src={{ item.name }}.builder dest={{ ansible_env.ROOT }}/etc/swift
+ with_items: rings
diff --git a/extensions/swift-storage-devices-validate.py b/extensions/swift-storage-devices-validate.py
new file mode 100755
index 00000000..57ab23d0
--- /dev/null
+++ b/extensions/swift-storage-devices-validate.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This is used by the openstack-swift.configure extension
+# to validate any provided storage device specifiers
+# under SWIFT_STORAGE_DEVICES
+#
+
+
+'''
+ This is used by the swift-storage.configure extension
+ to validate any storage device specifiers specified
+ in the SWIFT_STORAGE_DEVICES environment variable
+'''
+
+from __future__ import print_function
+
+import yaml
+import sys
+
+EXAMPLE_DEVSPEC = '{device: sdb1, ip: 127.0.0.1, weight: 100}'
+REQUIRED_KEYS = ['ip', 'device', 'weight']
+
+def err(msg):
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+if len(sys.argv) != 2:
+ err('usage: %s STRING_TO_BE_VALIDATED' % sys.argv[0])
+
+swift_storage_devices = yaml.load(sys.argv[1])
+
+if not isinstance(swift_storage_devices, list):
+ err('Expected list of device specifiers\n'
+ 'Example: [%s]' % EXAMPLE_DEVSPEC)
+
+for d in swift_storage_devices:
+ if not isinstance(d, dict):
+ err("Invalid device specifier: `%s'\n"
+ 'Device specifier must be a dictionary\n'
+ 'Example: %s' % (d, EXAMPLE_DEVSPEC))
+
+ if set(d.keys()) != set(REQUIRED_KEYS):
+ err("Invalid device specifier: `%s'\n"
+ 'Specifier should contain: %s\n'
+ 'Example: %s' % (d, str(REQUIRED_KEYS)[1:-1], EXAMPLE_DEVSPEC))
diff --git a/extensions/swift-storage.configure b/extensions/swift-storage.configure
new file mode 100644
index 00000000..391b392a
--- /dev/null
+++ b/extensions/swift-storage.configure
@@ -0,0 +1,107 @@
+#!/bin/bash
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+# The ansible script needs to know where the rootfs is, so we export it here
+export ROOT="$1"
+
+validate_number() {
+ local name="$1"
+ local value="$2"
+
+ local pattern='^[0-9]+$'
+ if ! [[ $value =~ $pattern ]]
+ then
+ echo "'$name' must be a number" >&2
+ exit 1
+ fi
+}
+
+validate_non_empty() {
+ local name="$1"
+ local value="$2"
+
+ if [[ $value = None ]]
+ then
+ echo "'$name' cannot be empty" >&2
+ exit 1
+ fi
+}
+
+MANDATORY_OPTIONS="SWIFT_HASH_PATH_PREFIX \
+ SWIFT_HASH_PATH_SUFFIX \
+ SWIFT_REBALANCE_SEED \
+ SWIFT_PART_POWER \
+ SWIFT_REPLICAS \
+ SWIFT_MIN_PART_HOURS \
+ SWIFT_STORAGE_DEVICES \
+ CONTROLLER_HOST_ADDRESS \
+ MANAGEMENT_INTERFACE_IP_ADDRESS"
+
+for option in $MANDATORY_OPTIONS
+do
+ if ! [[ -v $option ]]
+ then
+ missing_option=True
+ echo "Required option $option isn't set!" >&2
+ fi
+done
+
+if [[ $missing_option = True ]]; then exit 1; fi
+
+./swift-storage-devices-validate.py "$SWIFT_STORAGE_DEVICES"
+
+# Validate SWIFT_PART_POWER, SWIFT_REPLICAS, SWIFT_MIN_PART_HOURS
+# just make sure they're numbers
+
+validate_number "SWIFT_PART_POWER" "$SWIFT_PART_POWER"
+validate_number "SWIFT_REPLICAS" "$SWIFT_REPLICAS"
+validate_number "SWIFT_MIN_PART_HOURS" "$SWIFT_MIN_PART_HOURS"
+
+# Make sure these aren't empty
+validate_non_empty "SWIFT_HASH_PATH_PREFIX" "$SWIFT_HASH_PATH_PREFIX"
+validate_non_empty "SWIFT_HASH_PATH_SUFFIX" "$SWIFT_HASH_PATH_SUFFIX"
+validate_non_empty "SWIFT_REBALANCE_SEED" "$SWIFT_REBALANCE_SEED"
+validate_non_empty "CONTROLLER_HOST_ADDRESS" "$CONTROLLER_HOST_ADDRESS"
+validate_non_empty "MANAGEMENT_INTERFACE_IP_ADDRESS" "$MANAGEMENT_INTERFACE_IP_ADDRESS"
+
+mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks
+
+# A swift controller needs the storage setup service
+# but does not want any of the other storage services enabled
+ln -s "/usr/lib/systemd/system/swift-storage-setup.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage-setup.service"
+
+SWIFT_CONTROLLER=${SWIFT_CONTROLLER:-False}
+
+if [[ $SWIFT_CONTROLLER = False ]]
+then
+ ln -s "/usr/lib/systemd/system/rsync.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/rsync.service"
+ ln -s "/usr/lib/systemd/system/swift-storage.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage.service"
+fi
+
+# Build swift data structures (the rings)
+/usr/bin/ansible-playbook -i hosts swift-build-rings.yml
+
+cat << EOF > "$ROOT"/usr/share/swift/swift-storage-vars.yml
+---
+MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS
+SWIFT_HASH_PATH_PREFIX: $SWIFT_HASH_PATH_PREFIX
+SWIFT_HASH_PATH_SUFFIX: $SWIFT_HASH_PATH_SUFFIX
+EOF
diff --git a/extensions/sysroot.check b/extensions/sysroot.check
new file mode 100755
index 00000000..71b35175
--- /dev/null
+++ b/extensions/sysroot.check
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Preparatory checks for Morph 'sysroot' write extension
+
+set -eu
+
+if [ "$UPGRADE" == "yes" ]; then
+ echo >&2 "ERROR: Cannot upgrade a sysroot deployment"
+ exit 1
+fi
diff --git a/extensions/sysroot.write b/extensions/sysroot.write
new file mode 100755
index 00000000..46f1a780
--- /dev/null
+++ b/extensions/sysroot.write
@@ -0,0 +1,22 @@
+#!/bin/sh
+# Copyright (C) 2014,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# A Morph write extension to deploy to another directory
+
+set -eu
+
+mkdir -p "$2"
+
+cp -a "$1"/* "$2"
diff --git a/extensions/tar.check b/extensions/tar.check
new file mode 100755
index 00000000..f2304d46
--- /dev/null
+++ b/extensions/tar.check
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Preparatory checks for Morph 'tar' write extension
+
+set -eu
+
+if [ "$UPGRADE" == "yes" ]; then
+ echo >&2 "ERROR: Cannot upgrade a tar file deployment."
+ exit 1
+fi
diff --git a/extensions/tar.write b/extensions/tar.write
new file mode 100755
index 00000000..01b545b4
--- /dev/null
+++ b/extensions/tar.write
@@ -0,0 +1,20 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# A Morph write extension to deploy to a .tar file
+
+set -eu
+
+tar -C "$1" -cf "$2" .
diff --git a/extensions/tar.write.help b/extensions/tar.write.help
new file mode 100644
index 00000000..b45c61fa
--- /dev/null
+++ b/extensions/tar.write.help
@@ -0,0 +1,19 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ Create a .tar file of the deployed system.
+
+ The `location` argument is a pathname to the .tar file to be
+ created.
diff --git a/extensions/trove.configure b/extensions/trove.configure
new file mode 100755
index 00000000..f823762c
--- /dev/null
+++ b/extensions/trove.configure
@@ -0,0 +1,148 @@
+#!/bin/sh
+#
+# Copyright (C) 2013 - 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to fully configure
+# a Trove instance at deployment time. It uses the following variables
+# from the environment (run `morph help trove.configure` to see a description
+# of them):
+#
+# * TROVE_ID
+# * TROVE_HOSTNAME (optional, defaults to TROVE_ID)
+# * TROVE_COMPANY
+# * LORRY_SSH_KEY
+# * UPSTREAM_TROVE
+# * UPSTREAM_TROVE_PROTOCOL
+# * TROVE_ADMIN_USER
+# * TROVE_ADMIN_EMAIL
+# * TROVE_ADMIN_NAME
+# * TROVE_ADMIN_SSH_PUBKEY
+# * LORRY_CONTROLLER_MINIONS (optional, defaults to 4)
+# * TROVE_BACKUP_KEYS - a space-separated list of paths to SSH keys.
+# (optional)
+# * TROVE_GENERIC (optional)
+#
+# The configuration of a Trove is slightly tricky: part of it has to
+# be run on the configured system after it has booted. We accomplish
+# this by copying in all the relevant data to the target system
+# (in /var/lib/trove-setup), and creating a systemd unit file that
+# runs on the first boot. The first boot will be detected by the
+# existence of the /var/lib/trove-setup/needed file.
+
+set -e
+
+if [ "$TROVE_GENERIC" ]
+then
+ echo "Not configuring the trove, it will be generic"
+ exit 0
+fi
+
+
+# Check that all the variables needed are present:
+
+error_vars=false
+if test "x$TROVE_ID" = "x"; then
+ echo "ERROR: TROVE_ID needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_COMPANY" = "x"; then
+ echo "ERROR: TROVE_COMPANY needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_USER" = "x"; then
+ echo "ERROR: TROVE_ADMIN_USER needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_NAME" = "x"; then
+ echo "ERROR: TROVE_ADMIN_NAME needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_EMAIL" = "x"; then
+ echo "ERROR: TROVE_ADMIN_EMAIL needs to be defined."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $LORRY_SSH_KEY > /dev/null 2>&1
+then
+ echo "ERROR: LORRY_SSH_KEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $WORKER_SSH_PUBKEY > /dev/null 2>&1
+then
+ echo "ERROR: WORKER_SSH_PUBKEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $TROVE_ADMIN_SSH_PUBKEY > /dev/null 2>&1
+then
+ echo "ERROR: TROVE_ADMIN_SSH_PUBKEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if "$error_vars"; then
+ exit 1
+fi
+
+ROOT="$1"
+
+
+TROVE_DATA="$ROOT/etc/trove"
+mkdir -p "$TROVE_DATA"
+
+install -m 0600 "$LORRY_SSH_KEY" "$TROVE_DATA/lorry.key"
+install -m 0644 "${LORRY_SSH_KEY}.pub" "$TROVE_DATA/lorry.key.pub"
+install -m 0644 "$TROVE_ADMIN_SSH_PUBKEY" "$TROVE_DATA/admin.key.pub"
+install -m 0644 "$WORKER_SSH_PUBKEY" "$TROVE_DATA/worker.key.pub"
+
+
+python <<'EOF' >"$TROVE_DATA/trove.conf"
+import os, sys, yaml
+
+trove_configuration={
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_COMPANY': os.environ['TROVE_COMPANY'],
+ 'TROVE_ADMIN_USER': os.environ['TROVE_ADMIN_USER'],
+ 'TROVE_ADMIN_EMAIL': os.environ['TROVE_ADMIN_EMAIL'],
+ 'TROVE_ADMIN_NAME': os.environ['TROVE_ADMIN_NAME'],
+ 'LORRY_SSH_KEY': '/etc/trove/lorry.key',
+ 'LORRY_SSH_PUBKEY': '/etc/trove/lorry.key.pub',
+ 'TROVE_ADMIN_SSH_PUBKEY': '/etc/trove/admin.key.pub',
+ 'WORKER_SSH_PUBKEY': '/etc/trove/worker.key.pub',
+}
+
+
+
+optional_keys = ('MASON_ID', 'HOSTNAME', 'TROVE_HOSTNAME',
+ 'LORRY_CONTROLLER_MINIONS', 'TROVE_BACKUP_KEYS',
+ 'UPSTREAM_TROVE', 'UPSTREAM_TROVE_PROTOCOL')
+
+for key in optional_keys:
+ if key in os.environ:
+ trove_configuration[key]=os.environ[key]
+
+yaml.dump(trove_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+if [ -n "$TROVE_BACKUP_KEYS" ]; then
+ mkdir -p "$TROVE_DATA/backup-keys"
+ cp -- $TROVE_BACKUP_KEYS "$TROVE_DATA/backup-keys"
+ echo "TROVE_BACKUP_KEYS: /etc/trove/backup-keys/*" >> "$TROVE_DATA/trove.conf"
+fi
diff --git a/extensions/trove.configure.help b/extensions/trove.configure.help
new file mode 100644
index 00000000..c96bdf74
--- /dev/null
+++ b/extensions/trove.configure.help
@@ -0,0 +1,126 @@
+help: |
+ This is a "morph deploy" configuration extension to fully configure
+ a Trove instance at deployment time. It uses the following
+ configuration variables:
+
+ * `TROVE_ID`
+ * `TROVE_HOSTNAME` (optional, defaults to `TROVE_ID`)
+ * `TROVE_COMPANY`
+ * `LORRY_SSH_KEY`
+ * `UPSTREAM_TROVE`
+ * `TROVE_ADMIN_USER`
+ * `TROVE_ADMIN_EMAIL`
+ * `TROVE_ADMIN_NAME`
+ * `TROVE_ADMIN_SSH_PUBKEY`
+ * `LORRY_CONTROLLER_MINIONS` (optional, defaults to 4)
+ * `TROVE_BACKUP_KEYS` - a space-separated list of paths to SSH keys.
+ (optional)
+
+ The variables are described in more detail below.
+
+ A Trove deployment needs to know the following things:
+
+ * The Trove's ID and public name.
+ * The Trove's administrator name and access details.
+ * Private and public SSH keys for the Lorry user on the Trove.
+ * Which upstream Trove it should be set to mirror upon initial deploy.
+
+ These are specified with the configuration variables described in this
+ help.
+
+ * `TROVE_GENERIC` -- boolean. If it's true the trove will be generic
+ and it won't be configured with any of the other variables listed
+ here.
+
+ * `TROVE_ID` -- the identifier of the Trove. This separates it from
+ other Troves, and allows mirroring of Troves to happen without local
+ changes getting overwritten.
+
+ The Trove ID is used in several ways. Any local repositories (those not
+ mirrored from elsewhere) get created under a prefix that is the ID.
+ Thus, the local repositories on the `git.baserock.org` Trove, whose
+ Trove ID is `baserock`, are named
+ `baserock/baserock/definitions.git` and similar. The ID is used
+ there twice: first as a prefix and then as a "project name" within
+ that prefix. There can be more projects under the prefix. For
+ example, there is a `baserock/local-config/lorries.git` repository,
+ where `local-config` is a separate project from `baserock`. Projects
+ here are a concept for the Trove's git access control language.
+
+ The Trove ID also used as the prefix for any branch and tag names
+ created locally for repositories that are not local. Thus, in the
+ `delta/linux.git` repository, any local branches would be called
+ something like `baserock/morph`, instead of just `morph`. The
+ Trove's git access control prevents normal uses from pushing
+ branches and tags that do not have the Trove ID as the prefix.
+
+ * `TROVE_HOSTNAME` -- the public name of the Trove. This is an
+ optional setting, and defaults to `TROVE_ID`. The public name is
+ typically the domain name of the server (e.g., `git.baserock.org`),
+ but can also be an IP address. This setting is used when Trove needs
+ to generate URLs that point to itself, such as the `git://` and
+ `http://` URLs for each git repository that is viewed via the web
+ interface.
+
+ Note that this is _not_ the system hostname. That is set separately,
+ with the `HOSTNAME` configuration setting (see the
+ `set-hostname.configure` extension).
+
+ * `TROVE_COMPANY` -- a description of the organisation who own the
+ Trove. This is shown in various parts of the web interface of the
+ Trove. It is for descriptive purposes only.
+
+ * `LORRY_SSH_KEY` -- ssh key pair that the Trove's Lorry will use to
+ access an upstream Trove, and to push updates to the Trove's git
+ server.
+
+ The value is a filename on the system doing the deployment (where
+ `morph deploy` is run). The file contains the _private_ key, and the
+ public key is in a file with the `.pub` suffix added to the name.
+
+ The upstream Trove needs to be configured to allow this key to
+ access it. This configuration does not do that automatically.
+
+ * `UPSTREAM_TROVE` -- public name of the upstream Trove (domain
+ name or IP address). This is an optional setting. If it's set,
+ the new Trove will be configured to mirror that Trove.
+
+ * `TROVE_ADMIN_USER`, `TROVE_ADMIN_EMAIL`, `TROVE_ADMIN_NAME`,
+ `TROVE_ADMIN_SSH_PUBKEY` -- details of the Trove's (initial)
+ administrator.
+
+ Each Trove needs at least one administrator user, and one is created
+ upon initial deployment. `TROVE_ADMIN_USER` is the username of the
+ account to be created, `TROVE_ADMIN_EMAIL` should be the e-mail of
+ the user, and `TROVE_ADMIN_NAME` is their name. If more
+ administrators are needed, the initial person should create them
+ using the usual Gitano commands.
+
+ * `LORRY_CONTROLLER_MINIONS` -- the number of Lorry Controller worker
+ processes to start. This is an optional setting and defaults to 4.
+ The more workers are running, the more Lorry jobs can run at the same
+ time, but the more resources they require.
+
+ * `TROVE_BACKUP_KEYS` -- a space-separated list of paths to SSH keys.
+ If this is set, the Trove will have a backup user that can be accessed
+ with rsync using the SSH keys provided.
+
+ Example
+ -------
+
+ The following set of variables could be to deploy a Trove instance:
+
+ TROVE_ID: my-trove
+ TROVE_HOSTNAME: my-trove.example.com
+ TROVE_COMPANY: My Personal Trove for Me, Myself and I
+ LORRY_SSH_KEY: my-trove/lorry.key
+ UPSTREAM_TROVE: git.baserock.org
+ UPSTREAM_TROVE_USER: my-trove
+ UPSTREAM_TROVE_EMAIL: my-trove@example.com
+ TROVE_ADMIN_USER: tomjon
+ TROVE_ADMIN_EMAIL: tomjon@example.com
+ TROVE_ADMIN_NAME: Tomjon of Lancre
+ TROVE_ADMIN_SSH_PUBKEY: my-trove/tomjon.key.pub
+
+ These would be put into the cluster morphology used to do the
+ deployment.
diff --git a/extensions/vagrant.configure b/extensions/vagrant.configure
new file mode 100644
index 00000000..abc3ea0c
--- /dev/null
+++ b/extensions/vagrant.configure
@@ -0,0 +1,55 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+if test "x$VAGRANT" = "x"; then
+ exit 0
+fi
+
+for needed in etc/ssh/sshd_config etc/sudoers; do
+ if ! test -e "$ROOT/$needed"; then
+ echo >&2 "Unable to find $needed"
+ echo >&2 "Cannot continue configuring as Vagrant basebox"
+ exit 1
+ fi
+done
+
+# SSH daemon needs to be configured to not use DNS...
+sed -i -e's/^(.*[Uu]][Ss][Ee][Dd][Nn][Ss].*)$/#\1/' "$ROOT/etc/ssh/sshd_config"
+echo "UseDNS no" >> "$ROOT/etc/ssh/sshd_config"
+
+# We need to add a vagrant user with "vagrant" as the password We're doing this
+# manually because chrooting in to run adduser is not really allowed for
+# deployment time since we wouldn't be able to run the adduser necessarily. In
+# practice for now we'd be able to because we can't deploy raw disks
+# cross-platform and expect extlinux to install but we won't, for good
+# practice and to hilight this deficiency.
+echo 'vagrant:x:1000:1000:Vagrant User:/home/vagrant:/bin/bash' >> "$ROOT/etc/passwd"
+echo 'vagrant:/6PTOoWylhw3w:16198:0:99999:7:::' >> "$ROOT/etc/shadow"
+echo 'vagrant:x:1000:' >> "$ROOT/etc/group"
+mkdir -p "$ROOT/home/vagrant"
+chown -R 1000:1000 "$ROOT/home/vagrant"
+
+# Next, the vagrant user is meant to have sudo access
+echo 'vagrant ALL=(ALL) NOPASSWD: ALL' >> "$ROOT/etc/sudoers"
+
+# And ensure that we get sbin in our path
+echo 'PATH="$PATH:/sbin:/usr/sbin"' >> "$ROOT/etc/profile"
+echo 'export PATH' >> "$ROOT/etc/profile"
+
diff --git a/extensions/vdaboot.configure b/extensions/vdaboot.configure
new file mode 100755
index 00000000..60de925b
--- /dev/null
+++ b/extensions/vdaboot.configure
@@ -0,0 +1,33 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Change the "/" mount point to /dev/vda to use virtio disks.
+
+set -e
+
+if [ "$OPENSTACK_USER" ]
+then
+ # Modifying fstab
+ if [ -f "$1/etc/fstab" ]
+ then
+ mv "$1/etc/fstab" "$1/etc/fstab.old"
+ awk 'BEGIN {print "/dev/vda / btrfs defaults,rw,noatime 0 1"};
+ $2 != "/" {print $0 };' "$1/etc/fstab.old" > "$1/etc/fstab"
+ rm "$1/etc/fstab.old"
+ else
+ echo "/dev/vda / btrfs defaults,rw,noatime 0 1"> "$1/etc/fstab"
+ fi
+fi
diff --git a/extensions/virtualbox-ssh.check b/extensions/virtualbox-ssh.check
new file mode 100755
index 00000000..a97f3294
--- /dev/null
+++ b/extensions/virtualbox-ssh.check
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'virtualbox-ssh' write extension'''
+
+import cliapp
+
+import morphlib.writeexts
+
+
+class VirtualBoxPlusSshCheckExtension(morphlib.writeexts.WriteExtension):
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise cliapp.AppException(
+ 'Use the `ssh-rsync` write extension to deploy upgrades to an '
+ 'existing remote system.')
+
+VirtualBoxPlusSshCheckExtension().run()
diff --git a/extensions/virtualbox-ssh.write b/extensions/virtualbox-ssh.write
new file mode 100755
index 00000000..774f2b4f
--- /dev/null
+++ b/extensions/virtualbox-ssh.write
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright (C) 2012-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to VirtualBox via ssh.
+
+VirtualBox is assumed to be running on a remote machine, which is
+accessed over ssh. The machine gets created, but not started.
+
+See file virtualbox-ssh.write.help for documentation
+
+'''
+
+
+import cliapp
+import os
+import re
+import sys
+import time
+import tempfile
+import urlparse
+
+import morphlib.writeexts
+
+
+class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension):
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ temp_root, location = args
+ ssh_host, vm_name, vdi_path = self.parse_location(location)
+ autostart = self.get_environment_boolean('AUTOSTART')
+
+ vagrant = self.get_environment_boolean('VAGRANT')
+
+ fd, raw_disk = tempfile.mkstemp()
+ os.close(fd)
+ self.create_local_system(temp_root, raw_disk)
+
+ try:
+ self.transfer_and_convert_to_vdi(
+ raw_disk, ssh_host, vdi_path)
+ self.create_virtualbox_guest(ssh_host, vm_name, vdi_path,
+ autostart, vagrant)
+ except BaseException:
+ sys.stderr.write('Error deploying to VirtualBox')
+ os.remove(raw_disk)
+ cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vdi_path])
+ raise
+ else:
+ os.remove(raw_disk)
+ self.status(
+ msg='Virtual machine %(vm_name)s has been created',
+ vm_name=vm_name)
+
+ def parse_location(self, location):
+ '''Parse the location argument to get relevant data.'''
+
+ x = urlparse.urlparse(location)
+ if x.scheme != 'vbox+ssh':
+ raise cliapp.AppException(
+ 'URL schema must be vbox+ssh in %s' % location)
+ m = re.match('^/(?P<guest>[^/]+)(?P<path>/.+)$', x.path)
+ if not m:
+ raise cliapp.AppException('Cannot parse location %s' % location)
+ return x.netloc, m.group('guest'), m.group('path')
+
+ def transfer_and_convert_to_vdi(self, raw_disk, ssh_host, vdi_path):
+ '''Transfer raw disk image to VirtualBox host, and convert to VDI.'''
+
+ self.status(msg='Transfer disk and convert to VDI')
+
+ st = os.lstat(raw_disk)
+ xfer_hole_path = morphlib.util.get_data_path('xfer-hole')
+ recv_hole = morphlib.util.get_data('recv-hole')
+
+ ssh_remote_cmd = [
+ 'sh', '-c', recv_hole,
+ 'dummy-argv0', 'vbox', vdi_path, str(st.st_size),
+ ]
+
+ cliapp.runcmd(
+ ['python', xfer_hole_path, raw_disk],
+ ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd),
+ stdout=None, stderr=None)
+
+ def virtualbox_version(self, ssh_host):
+ 'Get the version number of the VirtualBox running on the remote host.'
+
+ # --version gives a build id, which looks something like
+ # 1.2.3r456789, so we need to strip the suffix off and get a tuple
+ # of the (major, minor, patch) version, since comparing with a
+ # tuple is more reliable than a string and more convenient than
+ # comparing against the major, minor and patch numbers directly
+ self.status(msg='Checking version of remote VirtualBox')
+ build_id = cliapp.ssh_runcmd(ssh_host, ['VBoxManage', '--version'])
+ version_string = re.match(r"^([0-9\.]+).*$", build_id.strip()).group(1)
+ return tuple(int(s or '0') for s in version_string.split('.'))
+
+ def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart,
+ vagrant):
+ '''Create the VirtualBox virtual machine.'''
+
+ self.status(msg='Create VirtualBox virtual machine')
+
+ ram_mebibytes = str(self.get_ram_size() / (1024**2))
+
+ vcpu_count = str(self.get_vcpu_count())
+
+ if not vagrant:
+ hostonly_iface = self.get_host_interface(ssh_host)
+
+ if self.virtualbox_version(ssh_host) < (4, 3, 0):
+ sataportcount_option = '--sataportcount'
+ else:
+ sataportcount_option = '--portcount'
+
+ commands = [
+ ['createvm', '--name', vm_name, '--ostype', 'Linux26_64',
+ '--register'],
+ ['modifyvm', vm_name, '--ioapic', 'on',
+ '--memory', ram_mebibytes, '--cpus', vcpu_count],
+ ['storagectl', vm_name, '--name', 'SATA Controller',
+ '--add', 'sata', '--bootable', 'on', sataportcount_option, '2'],
+ ['storageattach', vm_name, '--storagectl', 'SATA Controller',
+ '--port', '0', '--device', '0', '--type', 'hdd', '--medium',
+ vdi_path],
+ ]
+ if vagrant:
+ commands[1].extend(['--nic1', 'nat',
+ '--natnet1', 'default'])
+ else:
+ commands[1].extend(['--nic1', 'hostonly',
+ '--hostonlyadapter1', hostonly_iface,
+ '--nic2', 'nat', '--natnet2', 'default'])
+
+ attach_disks = self.parse_attach_disks()
+ for device_no, disk in enumerate(attach_disks, 1):
+ cmd = ['storageattach', vm_name,
+ '--storagectl', 'SATA Controller',
+ '--port', str(device_no),
+ '--device', '0',
+ '--type', 'hdd',
+ '--medium', disk]
+ commands.append(cmd)
+
+ if autostart:
+ commands.append(['startvm', vm_name])
+
+ for command in commands:
+ argv = ['VBoxManage'] + command
+ cliapp.ssh_runcmd(ssh_host, argv)
+
+ def get_host_interface(self, ssh_host):
+ host_ipaddr = os.environ.get('HOST_IPADDR')
+ netmask = os.environ.get('NETMASK')
+
+ if host_ipaddr is None:
+ raise cliapp.AppException('HOST_IPADDR was not given')
+
+ if netmask is None:
+ raise cliapp.AppException('NETMASK was not given')
+
+ # 'VBoxManage list hostonlyifs' retrieves a list with the hostonly
+ # interfaces on the host. For each interface, the following lines
+ # are shown on top:
+ #
+ # Name: vboxnet0
+ # GUID: 786f6276-656e-4074-8000-0a0027000000
+ # Dhcp: Disabled
+ # IPAddress: 192.168.100.1
+ #
+ # The following command tries to retrieve the hostonly interface
+ # name (e.g. vboxnet0) associated with the given ip address.
+ iface = None
+ lines = cliapp.ssh_runcmd(ssh_host,
+ ['VBoxManage', 'list', 'hostonlyifs']).splitlines()
+ for i, v in enumerate(lines):
+ if host_ipaddr in v:
+ iface = lines[i-3].split()[1]
+ break
+
+ if iface is None:
+ iface = cliapp.ssh_runcmd(ssh_host,
+ ['VBoxManage', 'hostonlyif', 'create'])
+ # 'VBoxManage hostonlyif create' shows the name of the
+ # created hostonly interface inside single quotes
+ iface = iface[iface.find("'") + 1 : iface.rfind("'")]
+ cliapp.ssh_runcmd(ssh_host,
+ ['VBoxManage', 'hostonlyif',
+ 'ipconfig', iface,
+ '--ip', host_ipaddr,
+ '--netmask', netmask])
+
+ return iface
+
+VirtualBoxPlusSshWriteExtension().run()
diff --git a/extensions/virtualbox-ssh.write.help b/extensions/virtualbox-ssh.write.help
new file mode 100644
index 00000000..2dbf988c
--- /dev/null
+++ b/extensions/virtualbox-ssh.write.help
@@ -0,0 +1,135 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Deploy a Baserock system as a *new* VirtualBox virtual machine.
+ (Use the `ssh-rsync` write extension to deploy upgrades to an *existing*
+ VM)
+
+ Connects to HOST via ssh to run VirtualBox's command line management tools.
+
+ Parameters:
+
+ * location: a custom URL scheme of the form `vbox+ssh://HOST/GUEST/PATH`,
+ where:
+ * HOST is the name of the host on which VirtualBox is running
+ * GUEST is the name of the guest VM on that host
+ * PATH is the path to the disk image that should be created,
+ on that host. For example,
+ `vbox+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where
+ * `alice@192.168.122.1` is the target host as given to ssh,
+ **from within the development host** (which may be
+ different from the target host's normal address);
+ * `testsys` is the name of the new guest VM';
+ * `/home/alice/testys.img` is the pathname of the disk image files
+ on the target host.
+
+ * HOSTNAME=name: the hostname of the **guest** VM within the network into
+ which it is being deployed.
+
+ * DISK_SIZE=X: **(MANDATORY)** the size of the VM's primary virtual hard
+ disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower
+ case) to indicate kilo-, mega-, or gigabytes. For example,
+ `DISK_SIZE=100G` would create a 100 gigabyte virtual hard disk.
+
+ * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate
+ for itself from the host. `X` is interpreted in the same as for
+ DISK_SIZE, and defaults to `1G`.
+
+ * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do
+ not use more CPU cores than you have available physically (real cores,
+ no hyperthreads).
+
+ * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to
+ tell Linux to use, rather than booting the rootfs directly.
+
+ * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree
+ binary - Give the full path (without a leading /) to the location of the
+ DTB in the built system image . The deployment will fail if `path` does
+ not exist.
+
+ * BOOTLOADER_INSTALL=value: the bootloader to be installed
+ **(MANDATORY)** for non-x86 systems
+
+ allowed values =
+ - 'extlinux' (default) - the extlinux bootloader will
+ be installed
+ - 'none' - no bootloader will be installed by `morph deploy`. A
+ bootloader must be installed manually. This value must be used when
+ deploying non-x86 systems such as ARM.
+
+ * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used.
+ If not specified for x86-32 and x86-64 systems, 'extlinux' will be used
+
+ allowed values =
+ - 'extlinux'
+
+ * KERNEL_ARGS=args: optional additional kernel command-line parameters to
+ be appended to the default set. The default set is:
+
+ 'rw init=/sbin/init rootfstype=btrfs \
+ rootflags=subvol=systems/default/run \
+ root=[name or UUID of root filesystem]'
+
+ (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt)
+
+ * AUTOSTART=<VALUE> - boolean. If it is set, the VM will be started when
+ it has been deployed.
+
+ * VAGRANT=<VALUE> - boolean. If it is set, then networking is configured
+ so that the VM will work with Vagrant. Otherwise networking is
+ configured to run directly in VirtualBox.
+
+ * HOST_IPADDR=<ip_address> - the IP address of the VM host.
+
+ * NETMASK=<netmask> - the netmask of the VM host.
+
+ * NETWORK_CONFIG=<net_config> - `net_config` is used to set up the VM's
+ network interfaces. It is a string containing semi-colon separated
+ 'stanzas' where each stanza provides information about a network
+ interface. Each stanza is of the form name:type[,arg=value] e.g.
+
+ lo:loopback
+ eth0:dhcp
+ eth1:static,address=10.0.0.1,netmask=255.255.0.0
+
+ An example of the NETWORK_CONFIG parameter (It should be in one line)
+
+ `"lo:loopback;eth0:static,address=192.168.100.2,netmask=255.255.255.0;
+ eth1:dhcp,hostname=$(hostname)"`
+
+ It is useful to configure one interface to use NAT to give the VM access
+ to the outside world and another interface to use the Virtual Box host
+ adapter to allow you to access the Trove from the host machine.
+
+ The NAT interface eth1 is set up to use dhcp, the host-only adapter
+ interface is configured statically.
+
+ Note: you must give the host-only adapter interface an address that lies
+ **on the same network** as the host adapter. So if the host adapter has
+ an IP of 192.168.100.1 eth0 should have an address such as
+ 192.168.100.42.
+
+ The settings of the host adapter, including its IP can be changed either
+ in the VirtualBox manager UI
+ (https://www.virtualbox.org/manual/ch03.html#settings-network)
+ or via the VBoxManage command line
+ (https://www.virtualbox.org/manual/ch08.html#idp57572192)
+
+ See Chapter 6 of the VirtualBox User Manual for more information about
+ virtual networking (https://www.virtualbox.org/manual/ch06.html)
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)