summaryrefslogtreecommitdiff
path: root/old/extensions
diff options
context:
space:
mode:
Diffstat (limited to 'old/extensions')
-rwxr-xr-xold/extensions/add-config-files.configure26
-rw-r--r--old/extensions/busybox-init.configure145
-rw-r--r--old/extensions/ceph.configure344
-rwxr-xr-xold/extensions/cloud-init.configure69
-rwxr-xr-xold/extensions/distbuild-trove-nfsboot.check153
-rwxr-xr-xold/extensions/distbuild-trove-nfsboot.write283
-rw-r--r--old/extensions/distbuild-trove-nfsboot.write.help49
-rw-r--r--old/extensions/distbuild.configure132
-rwxr-xr-xold/extensions/fstab.configure28
-rw-r--r--old/extensions/genivi.configure124
-rw-r--r--old/extensions/genivi.configure.help25
-rw-r--r--old/extensions/hosts1
-rwxr-xr-xold/extensions/hosts.configure50
-rw-r--r--old/extensions/image-package-example/README9
-rw-r--r--old/extensions/image-package-example/common.sh.in72
-rw-r--r--old/extensions/image-package-example/disk-install.sh.in51
-rw-r--r--old/extensions/image-package-example/make-disk-image.sh.in36
-rwxr-xr-xold/extensions/image-package.write168
-rwxr-xr-xold/extensions/initramfs.write26
-rw-r--r--old/extensions/initramfs.write.help55
-rwxr-xr-xold/extensions/install-essential-files.configure40
-rw-r--r--old/extensions/install-essential-files.configure.help20
-rwxr-xr-xold/extensions/install-files.configure138
-rw-r--r--old/extensions/install-files.configure.help86
-rwxr-xr-xold/extensions/installer.configure48
-rw-r--r--old/extensions/jffs2.write64
-rw-r--r--old/extensions/jffs2.write.help28
-rwxr-xr-xold/extensions/kvm.check171
-rwxr-xr-xold/extensions/kvm.write126
-rw-r--r--old/extensions/kvm.write.help90
-rw-r--r--old/extensions/mason.configure153
-rw-r--r--old/extensions/mason/ansible/hosts1
-rw-r--r--old/extensions/mason/ansible/mason-setup.yml83
-rw-r--r--old/extensions/mason/httpd.service10
-rwxr-xr-xold/extensions/mason/mason-generator.sh101
-rwxr-xr-xold/extensions/mason/mason-report.sh297
-rw-r--r--old/extensions/mason/mason-setup.service16
-rw-r--r--old/extensions/mason/mason.service12
-rwxr-xr-xold/extensions/mason/mason.sh90
-rw-r--r--old/extensions/mason/mason.timer10
-rw-r--r--old/extensions/mason/os-init-script6
-rw-r--r--old/extensions/mason/share/mason.conf14
-rw-r--r--old/extensions/mason/share/os.conf30
-rw-r--r--old/extensions/moonshot-kernel.configure33
-rwxr-xr-xold/extensions/nfsboot-server.configure58
-rwxr-xr-xold/extensions/nfsboot.check96
-rwxr-xr-xold/extensions/nfsboot.configure30
-rwxr-xr-xold/extensions/nfsboot.write206
-rw-r--r--old/extensions/nfsboot.write.help33
-rw-r--r--old/extensions/openstack-ceilometer.configure122
-rw-r--r--old/extensions/openstack-cinder.configure125
-rw-r--r--old/extensions/openstack-glance.configure101
-rw-r--r--old/extensions/openstack-ironic.configure157
-rw-r--r--old/extensions/openstack-keystone.configure123
-rw-r--r--old/extensions/openstack-network.configure80
-rw-r--r--old/extensions/openstack-neutron.configure138
-rw-r--r--old/extensions/openstack-nova.configure163
-rw-r--r--old/extensions/openstack-swift-controller.configure49
-rw-r--r--old/extensions/openstack-time.configure61
-rwxr-xr-xold/extensions/openstack.check92
-rwxr-xr-xold/extensions/openstack.write94
-rw-r--r--old/extensions/openstack.write.help51
-rw-r--r--old/extensions/partitioning.py163
-rwxr-xr-xold/extensions/pxeboot.check86
-rw-r--r--old/extensions/pxeboot.write756
-rw-r--r--old/extensions/pxeboot.write.help166
-rw-r--r--old/extensions/pyfdisk.README144
-rw-r--r--old/extensions/pyfdisk.py769
-rwxr-xr-xold/extensions/rawdisk.check52
-rwxr-xr-xold/extensions/rawdisk.write122
-rw-r--r--old/extensions/rawdisk.write.help127
-rwxr-xr-xold/extensions/recv-hole158
-rwxr-xr-xold/extensions/sdk.write284
-rwxr-xr-xold/extensions/set-hostname.configure27
-rwxr-xr-xold/extensions/simple-network.configure296
-rwxr-xr-xold/extensions/ssh-rsync.check66
-rwxr-xr-xold/extensions/ssh-rsync.write175
-rw-r--r--old/extensions/ssh-rsync.write.help50
-rwxr-xr-xold/extensions/sshkeys.configure25
-rwxr-xr-xold/extensions/strip-gplv3.configure97
-rw-r--r--old/extensions/swift-build-rings.yml34
-rwxr-xr-xold/extensions/swift-storage-devices-validate.py60
-rw-r--r--old/extensions/swift-storage.configure107
-rwxr-xr-xold/extensions/sysroot.check23
-rwxr-xr-xold/extensions/sysroot.write22
-rwxr-xr-xold/extensions/tar.check23
-rwxr-xr-xold/extensions/tar.write20
-rw-r--r--old/extensions/tar.write.help19
-rwxr-xr-xold/extensions/trove.configure172
-rw-r--r--old/extensions/trove.configure.help134
-rw-r--r--old/extensions/vagrant.configure55
-rwxr-xr-xold/extensions/vdaboot.configure33
-rwxr-xr-xold/extensions/virtualbox-ssh.check36
-rwxr-xr-xold/extensions/virtualbox-ssh.write219
-rw-r--r--old/extensions/virtualbox-ssh.write.help135
-rw-r--r--old/extensions/writeexts.py1072
-rwxr-xr-xold/extensions/xfer-hole137
97 files changed, 11156 insertions, 0 deletions
diff --git a/old/extensions/add-config-files.configure b/old/extensions/add-config-files.configure
new file mode 100755
index 00000000..2cf96fd1
--- /dev/null
+++ b/old/extensions/add-config-files.configure
@@ -0,0 +1,26 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Copy all files located in $SRC_CONFIG_DIR to the image /etc.
+
+
+set -e
+
+if [ "x${SRC_CONFIG_DIR}" != x ]
+then
+ cp -r "$SRC_CONFIG_DIR"/* "$1/etc/"
+fi
+
diff --git a/old/extensions/busybox-init.configure b/old/extensions/busybox-init.configure
new file mode 100644
index 00000000..c7dba3b9
--- /dev/null
+++ b/old/extensions/busybox-init.configure
@@ -0,0 +1,145 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to configure a system
+# to use busybox for its init, if INIT_SYSTEM=busybox is specified.
+#
+# As well as checking INIT_SYSTEM, the following variables are used.
+#
+# Getty configuration:
+# * CONSOLE_DEVICE: Which device to spawn a getty on (default: ttyS0)
+# * CONSOLE_BAUDRATE: Baud rate of the console (default: 115200)
+# * CONSOLE_MODE: What kind of terminal this console emulates
+# (default: vt100)
+
+if [ "$INIT_SYSTEM" != busybox ]; then
+ echo Not configuring system to use busybox init.
+ exit 0
+fi
+
+set -e
+echo Configuring system to use busybox init
+
+RUN_SCRIPT=/etc/rcS
+INIT_SCRIPT=/sbin/init
+
+install_mdev_config(){
+ install -D -m644 /dev/stdin "$1" <<'EOF'
+# support module loading on hotplug
+$MODALIAS=.* root:root 660 @modprobe "$MODALIAS"
+
+# null may already exist; therefore ownership has to be changed with command
+null root:root 666 @chmod 666 $MDEV
+zero root:root 666
+full root:root 666
+random root:root 444
+urandom root:root 444
+hwrandom root:root 444
+grsec root:root 660
+
+kmem root:root 640
+mem root:root 640
+port root:root 640
+# console may already exist; therefore ownership has to be changed with command
+console root:root 600 @chmod 600 $MDEV
+ptmx root:root 666
+pty.* root:root 660
+
+# Typical devices
+
+tty root:root 666
+tty[0-9]* root:root 660
+vcsa*[0-9]* root:root 660
+ttyS[0-9]* root:root 660
+
+# block devices
+ram[0-9]* root:root 660
+loop[0-9]+ root:root 660
+sd[a-z].* root:root 660
+hd[a-z][0-9]* root:root 660
+md[0-9]* root:root 660
+sr[0-9]* root:root 660 @ln -sf $MDEV cdrom
+fd[0-9]* root:root 660
+
+# net devices
+SUBSYSTEM=net;.* root:root 600 @nameif
+tun[0-9]* root:root 600 =net/
+tap[0-9]* root:root 600 =net/
+EOF
+}
+
+install_start_script(){
+ install -D -m755 /dev/stdin "$1" <<'EOF'
+#!/bin/sh
+mount -t devtmpfs devtmpfs /dev
+mount -t proc proc /proc
+mount -t sysfs sysfs /sys
+mkdir -p /dev/pts
+mount -t devpts devpts /dev/pts
+
+echo /sbin/mdev >/proc/sys/kernel/hotplug
+mdev -s
+
+hostname -F /etc/hostname
+
+run-parts -a start /etc/init.d
+EOF
+}
+
+install_inittab(){
+ local inittab="$1"
+ local dev="$2"
+ local baud="$3"
+ local mode="$4"
+ install -D -m644 /dev/stdin "$1" <<EOF
+::sysinit:$RUN_SCRIPT
+
+::askfirst:-/bin/cttyhack /bin/sh
+::askfirst:/sbin/getty -L $dev $baud $mode
+
+::ctrlaltdel:/sbin/reboot
+::shutdown:/sbin/swapoff -a
+::shutdown:/bin/umount -a -r
+::restart:/sbin/init
+EOF
+}
+
+install_init_symlink(){
+ local initdir="$(dirname "$1")"
+ local initname="$(basename "$1")"
+ mkdir -p "$initdir"
+ cd "$initdir"
+ for busybox_dir in . ../bin ../sbin ../usr/bin ../usr/sbin; do
+ local busybox="$busybox_dir/busybox"
+ if [ ! -x "$busybox" ]; then
+ continue
+ fi
+ ln -sf "$busybox" "$initname"
+ return 0
+ done
+ echo Unable to find busybox >&2
+ exit 1
+}
+
+install_mdev_config "$1/etc/mdev.conf"
+
+install_start_script "$1$RUN_SCRIPT"
+
+install_inittab "$1/etc/inittab" "${CONSOLE_DEV-ttyS0}" \
+ "${CONSOLE_BAUD-115200}" "${CONSOLE_MODE-vt100}"
+
+install_init_symlink "$1$INIT_SCRIPT"
diff --git a/old/extensions/ceph.configure b/old/extensions/ceph.configure
new file mode 100644
index 00000000..32f512ef
--- /dev/null
+++ b/old/extensions/ceph.configure
@@ -0,0 +1,344 @@
+#!/usr/bin/env python
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import shutil
+import stat
+import sys
+import re
+
+import writeexts
+
+systemd_monitor_template = """
+[Unit]
+Description=Ceph Monitor firstboot setup
+After=network-online.target
+
+[Service]
+ExecStart=/bin/sh /root/setup-ceph-head
+ExecStartPost=/bin/systemctl disable ceph-monitor-fboot.service
+
+[Install]
+WantedBy=multi-user.target
+"""
+
+systemd_monitor_fname_template = "ceph-monitor-fboot.service"
+
+systemd_osd_template = """
+[Unit]
+Description=Ceph osd firstboot setup
+After=network-online.target
+
+[Service]
+ExecStart=/bin/sh /root/setup-ceph-node
+ExecStartPost=/bin/systemctl disable ceph-storage-fboot.service
+
+[Install]
+WantedBy=multi-user.target
+"""
+systemd_osd_fname_template = "ceph-storage-fboot.service"
+
+ceph_monitor_config_template = """#!/bin/sh
+hn={self.hostname}
+monIp={self.mon_ip}
+clustFsid={self.cluster_fsid}
+ceph-authtool --create-keyring /tmp/ceph.mon.keyring \
+ --gen-key -n mon. --cap mon 'allow *'
+ceph-authtool /tmp/ceph.mon.keyring \
+ --import-keyring /etc/ceph/ceph.client.admin.keyring
+monmaptool --create --add "$hn" "$monIp" --fsid "$clustFsid" /tmp/monmap
+mkdir -p /var/lib/ceph/mon/ceph-"$hn"
+ceph-mon --mkfs -i "$hn" --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
+systemctl enable ceph-mon@"$hn".service
+systemctl start ceph-mon@"$hn".service
+"""
+
+ceph_storage_config_template = """#!/bin/sh
+storageDisk={self.osd_storage_dev}
+if `file -sL "$storageDisk" | grep -q ext4`; then
+ echo "ext4 disk detected. Proceding..."
+else
+ echo "ERROR: ext4 disk required." \
+ "Ensure $storageDisk is formated as ext4." >&2
+ exit 1
+fi
+hn={self.hostname}
+uuid="`uuidgen`"
+osdnum="`ceph osd create $uuid`"
+mkdir /var/lib/ceph/osd/ceph-"$osdnum"
+mount -o user_xattr "$storageDisk" /var/lib/ceph/osd/ceph-"$osdnum"
+ceph-osd -i "$osdnum" --mkfs --mkkey --osd-uuid "$uuid"
+ceph auth add osd."$osdnum" osd 'allow *' mon 'allow profile osd' \
+ -i /var/lib/ceph/osd/ceph-"$osdnum"/keyring
+ceph osd crush add-bucket "$hn" host
+ceph osd crush move "$hn" root=default
+ceph osd crush add osd."$osdnum" 1.0 host="$hn"
+systmectl enable ceph-osd@"$osdnum".service
+systemctl start ceph-osd@"$osdnum".service
+echo "$storageDisk /var/lib/ceph/osd/ceph-$osdnum/ ext4 defaults 0 2" \
+ >> /etc/fstab
+"""
+
+executable_file_permissions = (
+ stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR |
+ stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH |
+ stat.S_IROTH )
+
+class CephConfigurationExtension(writeexts.Extension):
+ """
+ Set up ceph server daemons.
+
+ Support for metadata server has not been tested.
+
+ Must include the following environment variables:
+
+ HOSTNAME - Must be defined it is used as the ID for
+ the monitor and metadata daemons.
+
+ CEPH_CONF - Provide a ceph configuration file.
+
+ Optional environment variables:
+
+ CEPH_CLUSTER - Cluster name, if not provided defaults to 'ceph'.
+
+ CEPH_BOOTSTRAP_OSD - Registered key capable of generating OSD
+ keys.
+
+ CEPH_BOOTSTRAP_MDS - Registered key capable of generating MDS
+ keys.
+
+ Bootstrap keys are required for creating OSD daemons on servers
+ that do not have a running monitor daemon. They are gathered
+ by 'ceph-deploy gatherkeys' but can be generated and registered
+ separately.
+
+ CEPH_CLIENT_ADMIN - Key required by any ceph action that requires
+ client admin authentication to run
+
+ CEPH_MON - (Blank) Create a ceph monitor daemon on the image.
+ CEPH_MON_KEYRING - Location of monitor keyring. Required by the
+ monitor if using cephx authentication.
+ CEPH_MON_IP - ip address that the monitor node will have. This is required
+ if CEPH_MON is set. It should also be set in the CEPH_CONF
+ file too.
+ CEPH_CLUSTER_FSID - A uuid for the ceph cluster. This is required if
+ CEPH_MON is set. It should also be set in the
+ CEPH_CONF file too.
+
+ CEPH_OSD - (Blank) Create a ceph object storage daemon on the image.
+ CEPH_OSD_X_DATA_DIR - Location of data directory for OSD.
+ Create an OSD daemon on image. 'X' is an integer
+ id, many osd daemons may be run on same server.
+ CEPH_OSD_STORAGE_DEV - Location of the storage device to be used to host
+ the osd file system. This is a required field.
+
+ CEPH_MDS - (Blank) Create a metadata server daemon on server.
+ """
+
+ def process_args(self, args):
+
+ if "HOSTNAME" not in os.environ:
+ sys.exit( "ERROR: Need a hostname defined by 'HOSTNAME'" )
+ if "CEPH_CONF" not in os.environ:
+ sys.exit( "ERROR: Need a ceph conf file defined by 'CEPH_CONF'" )
+
+ self.dest_dir = args[0]
+
+ self.cluster_name = "ceph"
+ self.hostname = os.environ["HOSTNAME"]
+
+ self.conf_file = "/etc/ceph/{}.conf".format(self.cluster_name)
+ self.admin_file = os.path.join( "/etc/ceph/",
+ "{}.client.admin.keyring".format(self.cluster_name) )
+ self.mon_dir = "/var/lib/ceph/mon/"
+ self.osd_dir = "/var/lib/ceph/osd/"
+ self.mds_dir = "/var/lib/ceph/mds/"
+ self.tmp_dir = "/var/lib/ceph/tmp/"
+ self.bootstrap_mds_dir = "/var/lib/ceph/bootstrap-mds/"
+ self.bootstrap_osd_dir = "/var/lib/ceph/bootstrap-osd/"
+ self.systemd_dir = "/etc/systemd/system/"
+ self.systemd_multiuser_dir = \
+ "/etc/systemd/system/multi-user.target.wants/"
+
+
+ print "Copying from " + os.getcwd()
+ self.copy_to_img(os.environ["CEPH_CONF"], self.conf_file)
+
+
+ # If the clustername is provided set it accprdingly. Default is "ceph"
+ if "CEPH_CLUSTER" in os.environ:
+ self.cluster_name = os.environ["CEPH_CLUSTER"]
+
+ # Copy over bootstrap keyrings
+ if "CEPH_BOOTSTRAP_OSD" in os.environ:
+ self.copy_bootstrap_osd(os.environ["CEPH_BOOTSTRAP_OSD"]);
+ if "CEPH_BOOTSTRAP_MDS" in os.environ:
+ self.copy_bootstrap_mds(os.environ["CEPH_BOOTSTRAP_MDS"]);
+
+ # Copy over admin keyring
+ if "CEPH_CLIENT_ADMIN" in os.environ:
+ self.copy_to_img(os.environ["CEPH_CLIENT_ADMIN"], self.admin_file);
+
+ # Configure any monitor daemons
+ if "CEPH_MON" in os.environ:
+
+ # check for and set self.mon_ip : needs static value.
+ if "CEPH_MON_IP" not in os.environ:
+ sys.exit("ERROR: Static ip required for the monitor node")
+ else:
+ self.mon_ip = os.environ["CEPH_MON_IP"]
+
+ # Check and set for cluster fsid : can have default
+ if "CEPH_CLUSTER_FSID" not in os.environ:
+ sys.exit("ERROR: UUID fsid value required for cluster.")
+ else:
+ self.cluster_fsid = os.environ["CEPH_CLUSTER_FSID"]
+
+ self.create_mon_data_dir(os.environ.get("CEPH_MON_KEYRING"))
+
+ # Configure any object storage daemons
+ if "CEPH_OSD" in os.environ:
+
+ # Check a osd storage device has been provided
+ if "CEPH_OSD_STORAGE_DEV" not in os.environ:
+ sys.exit("ERROR: Storage device required. \
+ Set 'CEPH_OSD_STORAGE_DEV'.")
+ else:
+ self.osd_storage_dev = os.environ["CEPH_OSD_STORAGE_DEV"]
+
+ self.create_osd_startup_script()
+
+ osd_re = r"CEPH_OSD_(\d+)_DATA_DIR$"
+
+ for env in os.environ.keys():
+ match = re.match(osd_re, env)
+ if match:
+ osd_data_dir_env = match.group(0)
+ osd_id = match.group(1)
+
+ self.create_osd_data_dir(osd_id,
+ os.environ.get(osd_data_dir_env))
+
+
+ # Configure any mds daemons
+ if "CEPH_MDS" in os.environ:
+ self.create_mds_data_dir()
+
+ # Create a fake 'partprobe'
+ fake_partprobe_filename = self.dest_dir + "/sbin/partprobe"
+ fake_partprobe = open(fake_partprobe_filename, 'w')
+ fake_partprobe.write("#!/bin/bash\nexit 0;\n")
+ fake_partprobe.close()
+ os.chmod(fake_partprobe_filename, executable_file_permissions)
+ self.create_startup_scripts()
+
+ def copy_to_img(self, src_file, dest_file):
+ shutil.copy(src_file, self.dest_dir + dest_file)
+
+ def copy_bootstrap_osd(self, src_file):
+ self.copy_to_img(src_file,
+ os.path.join(self.bootstrap_osd_dir,
+ "{}.keyring".format(self.cluster_name)))
+
+ def copy_bootstrap_mds(self, src_file):
+ self.copy_to_img(src_file,
+ os.path.join(self.bootstrap_mds_dir,
+ "{}.keyring".format(self.cluster_name)))
+
+ def symlink_to_multiuser(self, fname):
+ sys.stderr.write( os.path.join("../", fname) )
+ sys.stderr.write( self.dest_dir +
+ os.path.join(self.systemd_multiuser_dir, fname) )
+ print "Linking: %s into %s"%(fname, self.systemd_multiuser_dir)
+ os.symlink(os.path.join("../", fname),
+ self.dest_dir +
+ os.path.join(self.systemd_multiuser_dir, fname))
+
+ def create_mon_data_dir(self, src_keyring):
+
+ # Create systemd file to initialize the monitor data directory
+ keyring = ""
+ mon_systemd_fname = systemd_monitor_fname_template
+
+ systemd_script_name = self.dest_dir \
+ + os.path.join(self.systemd_dir, mon_systemd_fname)
+ print "Write monitor systemd script to " + systemd_script_name
+ mon_systemd = open(systemd_script_name, 'w')
+ mon_systemd.write(systemd_monitor_template)
+ mon_systemd.close()
+ # Create a symlink to the multi user target
+ self.symlink_to_multiuser(mon_systemd_fname)
+
+ def create_osd_data_dir(self, osd_id, data_dir):
+ if not data_dir:
+ data_dir = '/srv/osd' + osd_id
+
+ # Create the osd data dir
+ os.makedirs(self.dest_dir + data_dir)
+
+ def create_osd_startup_script(self):
+ osd_systemd_fname = systemd_osd_fname_template
+
+ osd_full_name = self.dest_dir + \
+ os.path.join(self.systemd_dir, osd_systemd_fname)
+ print "Write Storage systemd script to " + osd_full_name
+
+ osd_systemd = open(osd_full_name, 'w')
+
+ osd_systemd.write(systemd_osd_template)
+ osd_systemd.close()
+
+ # Create a symlink to the multi user target
+ self.symlink_to_multiuser(osd_systemd_fname)
+
+ def create_mds_data_dir(self):
+
+ # Create the monitor data directory
+ mds_data_dir = os.path.join(self.mds_dir,
+ "{}-{}".format(self.cluster_name, self.hostname))
+ os.makedirs(self.dest_dir + mds_data_dir)
+
+ # Create sysvinit file to start via sysvinit
+ sysvinit_file = os.path.join(mds_data_dir, "sysvinit")
+ open(self.dest_dir + sysvinit_file, 'a').close()
+
+
+ def create_startup_scripts(self):
+ print "Copying startup scripts to node:"
+
+ # Write monitor script if monitor requested
+ if "CEPH_MON" in os.environ:
+ head_setup_file = \
+ os.path.join(self.dest_dir,"root","setup-ceph-head")
+ with open(head_setup_file, "w") as hs_file:
+ hs_file.write( ceph_monitor_config_template.format(self=self) )
+
+ os.chmod(head_setup_file, executable_file_permissions)
+
+ # Write osd script if osd is requested
+ elif "CEPH_OSD" in os.environ:
+ osd_setup_file = \
+ os.path.join(self.dest_dir, "root", "setup-ceph-node")
+ with open(osd_setup_file, "w") as os_file:
+ os_file.write( ceph_storage_config_template.format(self=self) )
+
+ os.chmod(osd_setup_file, executable_file_permissions)
+
+ else:
+ print ("No valid node type defined. "
+ "A generic ceph node will be created.")
+
+CephConfigurationExtension().run()
diff --git a/old/extensions/cloud-init.configure b/old/extensions/cloud-init.configure
new file mode 100755
index 00000000..3bcc0909
--- /dev/null
+++ b/old/extensions/cloud-init.configure
@@ -0,0 +1,69 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#
+# This is a "morph deploy" configuration extension to enable the
+# cloud-init services.
+set -e
+
+ROOT="$1"
+
+# Write detailed logs to a special log file if set, otherwise everything
+# goes to stdout.
+if [ -z "$MORPH_LOG_FD" ]; then
+ MORPH_LOG_FD=1
+fi
+
+##########################################################################
+
+set -e
+
+case "$CLOUD_INIT" in
+''|False|no)
+ exit 0
+ ;;
+True|yes)
+ echo "Configuring cloud-init"
+ ;;
+*)
+ echo Unrecognised option "$CLOUD_INIT" to CLOUD_INIT
+ exit 1
+ ;;
+esac
+
+
+cloud_init_services="cloud-config.service
+ cloud-init-local.service
+ cloud-init.service
+ cloud-final.service"
+
+# Iterate over the cloud-init services and enable them creating a link
+# into /etc/systemd/system/multi-user.target.wants.
+# If the services to link are not present, fail.
+
+services_folder="lib/systemd/system"
+for service_name in $cloud_init_services; do
+ if [ ! -f "$ROOT/$services_folder/$service_name" ]; then
+ echo "ERROR: Service $service_name is missing." >&2
+ echo "Failed to configure cloud-init."
+ exit 1
+ else
+ echo Enabling systemd service "$service_name" >&"$MORPH_LOG_FD"
+ ln -sf "/$services_folder/$service_name" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$service_name"
+ fi
+done
diff --git a/old/extensions/distbuild-trove-nfsboot.check b/old/extensions/distbuild-trove-nfsboot.check
new file mode 100755
index 00000000..e825ac66
--- /dev/null
+++ b/old/extensions/distbuild-trove-nfsboot.check
@@ -0,0 +1,153 @@
+#!/usr/bin/python2
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'distbuild-trove-nfsboot' write extension'''
+
+import logging
+import os
+import sys
+
+import writeexts
+
+
+class DistbuildTroveNFSBootCheckExtension(writeexts.WriteExtension):
+
+ nfsboot_root = '/srv/nfsboot'
+ remote_user = 'root'
+
+ required_vars = [
+ 'DISTBUILD_CONTROLLER',
+ 'DISTBUILD_GIT_SERVER',
+ 'DISTBUILD_SHARED_ARTIFACT_CACHE',
+ 'DISTBUILD_TROVE_ID',
+ 'DISTBUILD_WORKERS',
+ 'DISTBUILD_WORKER_SSH_KEY',
+ ]
+
+ def system_path(self, system_name, version_label=None):
+ if version_label:
+ return os.path.join(self.nfsboot_root, system_name, 'systems',
+ version_label, 'run')
+ else:
+ return os.path.join(self.nfsboot_root, system_name)
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ nfs_host = args[0]
+ nfs_netloc = '%s@%s' % (self.remote_user, nfs_host)
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+
+ missing_vars = [var for var in self.required_vars
+ if not var in os.environ]
+ if missing_vars:
+ raise writeexts.ExtensionError(
+ 'Please set: %s' % ', '.join(missing_vars))
+
+ controllers = os.getenv('DISTBUILD_CONTROLLER').split()
+ workers = os.getenv('DISTBUILD_WORKERS').split()
+
+ if len(controllers) != 1:
+ raise writeexts.ExtensionError(
+ 'Please specify exactly one controller.')
+
+ if len(workers) == 0:
+ raise writeexts.ExtensionError(
+ 'Please specify at least one worker.')
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+
+ self.check_good_server(nfs_netloc)
+
+ system_names = set(controllers + workers)
+ for system_name in system_names:
+ if upgrade:
+ self.check_upgradeable(nfs_netloc, system_name, version_label)
+ else:
+ system_path = self.system_path(system_name)
+
+ if self.remote_directory_exists(nfs_netloc, system_path):
+ if self.get_environment_boolean('OVERWRITE') == False:
+ raise writeexts.ExtensionError(
+ 'System %s already exists at %s:%s. Try `morph '
+ 'upgrade` instead of `morph deploy`.' % (
+ system_name, nfs_netloc, system_path))
+
+ def check_good_server(self, netloc):
+ # FIXME: assumes root
+ self.check_ssh_connectivity(netloc.split('@')[-1])
+
+ # Is an NFS server
+ try:
+ writeexts.ssh_runcmd(
+ netloc, ['test', '-e', '/etc/exports'])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('server %s is not an nfs server'
+ % netloc)
+ try:
+ writeexts.ssh_runcmd(
+ netloc, ['systemctl', 'is-enabled', 'nfs-server.service'])
+
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('server %s does not control its '
+ 'nfs server by systemd' % netloc)
+
+ # TFTP server exports /srv/nfsboot/tftp
+ tftp_root = os.path.join(self.nfsboot_root, 'tftp')
+ try:
+ writeexts.ssh_runcmd(
+ netloc, ['test' , '-d', tftp_root])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('server %s does not export %s' %
+ (netloc, tftp_root))
+
+ def check_upgradeable(self, nfs_netloc, system_name, version_label):
+ '''Check that there is already a version of the system present.
+
+ Distbuild nodes are stateless, so an upgrade is actually pretty much
+ the same as an initial deployment. This test is just a sanity check.
+
+ '''
+ system_path = self.system_path(system_name)
+ system_version_path = self.system_path(system_name, version_label)
+
+ if not self.remote_directory_exists(nfs_netloc, system_path):
+ raise writeexts.ExtensionError(
+ 'System %s not found at %s:%s, cannot deploy an upgrade.' % (
+ system_name, nfs_netloc, system_path))
+
+ if self.remote_directory_exists(nfs_netloc, system_version_path):
+ if self.get_environment_boolean('OVERWRITE'):
+ pass
+ else:
+ raise writeexts.ExtensionError(
+ 'System %s version %s already exists at %s:%s.' % (
+ system_name, version_label, nfs_netloc,
+ system_version_path))
+
+ def remote_directory_exists(self, nfs_netloc, path):
+ try:
+ writeexts.ssh_runcmd(nfs_netloc, ['test', '-d', path])
+ except writeexts.ExtensionError as e:
+ logging.debug('SSH exception: %s', e)
+ return False
+
+ return True
+
+
+DistbuildTroveNFSBootCheckExtension().run()
diff --git a/old/extensions/distbuild-trove-nfsboot.write b/old/extensions/distbuild-trove-nfsboot.write
new file mode 100755
index 00000000..171a84a8
--- /dev/null
+++ b/old/extensions/distbuild-trove-nfsboot.write
@@ -0,0 +1,283 @@
+#!/usr/bin/python2
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''Morph .write extension for a distbuild network booting off a Trove with NFS.
+
+'''
+
+
+import os
+import subprocess
+import sys
+import tempfile
+
+import writeexts
+
+
+class DistbuildTroveNFSBootWriteExtension(writeexts.WriteExtension):
+
+ '''Create an NFS root and kernel on TFTP during Morph's deployment.
+
+ See distbuild-trove-nfsboot.help for documentation.
+
+ '''
+
+ nfsboot_root = '/srv/nfsboot'
+ remote_user = 'root'
+
+ def system_path(self, system_name, version_label=None):
+ if version_label:
+ # The 'run' directory is kind of a historical artifact. Baserock
+ # systems that have Btrfs root disks maintain an orig/ and a run/
+ # subvolume, so that one can find changes that have been made at
+ # runtime. For distbuild systems, this isn't necessary because the
+ # root filesystems of the nodes are effectively stateless. However,
+ # existing systems have bootloaders configured to look for the
+ # 'run' directory, so we need to keep creating it.
+ return os.path.join(self.nfsboot_root, system_name, 'systems',
+ version_label, 'run')
+ else:
+ return os.path.join(self.nfsboot_root, system_name)
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError('Wrong number of command line args')
+
+ local_system_path, nfs_host = args
+
+ nfs_netloc = '%s@%s' % (self.remote_user, nfs_host)
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+
+ controller_name = os.getenv('DISTBUILD_CONTROLLER')
+ worker_names = os.getenv('DISTBUILD_WORKERS').split()
+ system_names = set([controller_name] + worker_names)
+
+ git_server = os.getenv('DISTBUILD_GIT_SERVER')
+ shared_artifact_cache = os.getenv('DISTBUILD_SHARED_ARTIFACT_CACHE')
+ trove_id = os.getenv('DISTBUILD_TROVE_ID')
+ worker_ssh_key_path = os.getenv('DISTBUILD_WORKER_SSH_KEY')
+
+ host_map = self.parse_host_map_string(os.getenv('HOST_MAP', ''))
+
+ kernel_relpath = self.find_kernel(local_system_path)
+
+ copied_rootfs = None
+ for system_name in system_names:
+ remote_system_path = self.system_path(system_name, version_label)
+ if copied_rootfs is None:
+ self.transfer_system(
+ nfs_netloc, local_system_path, remote_system_path)
+ copied_rootfs = remote_system_path
+ else:
+ self.duplicate_remote_system(
+ nfs_netloc, copied_rootfs, remote_system_path)
+
+ for system_name in system_names:
+ remote_system_path = self.system_path(system_name, version_label)
+ self.link_kernel_to_tftpboot_path(
+ nfs_netloc, system_name, version_label, kernel_relpath)
+ self.set_hostname(
+ nfs_netloc, system_name, remote_system_path)
+ self.write_distbuild_config(
+ nfs_netloc, system_name, remote_system_path, git_server,
+ shared_artifact_cache, trove_id, worker_ssh_key_path,
+ controller_name, worker_names, host_map=host_map)
+
+ self.configure_nfs_exports(nfs_netloc, system_names)
+
+ for system_name in system_names:
+ self.update_default_version(nfs_netloc, system_name, version_label)
+
+ def parse_host_map_string(self, host_map_string):
+ '''Parse the HOST_MAP variable
+
+ Returns a dict mapping hostname to value (where value is an IP
+ address, a fully-qualified domain name, an alternate hostname, or
+ whatever).
+
+ '''
+ pairs = host_map_string.split(' ')
+ return writeexts.parse_environment_pairs({}, pairs)
+
+ def transfer_system(self, nfs_netloc, local_system_path,
+ remote_system_path):
+ self.status(msg='Copying rootfs to %(nfs_netloc)s',
+ nfs_netloc=nfs_netloc)
+ writeexts.ssh_runcmd(
+ nfs_netloc, ['mkdir', '-p', remote_system_path])
+ # The deployed rootfs may have been created by OSTree, so definitely
+ # don't pass --hard-links to `rsync`.
+ subprocess.check_call(
+ ['rsync', '--archive', '--delete', '--info=progress2',
+ '--protect-args', '--partial', '--sparse', '--xattrs',
+ local_system_path + '/',
+ '%s:%s' % (nfs_netloc, remote_system_path)], stdout=sys.stdout)
+
+ def duplicate_remote_system(self, nfs_netloc, source_system_path,
+ target_system_path):
+ self.status(msg='Duplicating rootfs to %(target_system_path)s',
+ target_system_path=target_system_path)
+ writeexts.ssh_runcmd(nfs_netloc,
+ ['mkdir', '-p', target_system_path])
+ # We can't pass --info=progress2 here, because it may not be available
+ # in the remote 'rsync'. The --info setting was added in RSync 3.1.0,
+ # old versions of Baserock have RSync 3.0.9. So the user doesn't get
+ # any progress info on stdout for the 'duplicate' stage.
+ writeexts.ssh_runcmd(nfs_netloc,
+ ['rsync', '--archive', '--delete', '--protect-args', '--partial',
+ '--sparse', '--xattrs', source_system_path + '/',
+ target_system_path], stdout=sys.stdout)
+
+ def find_kernel(self, local_system_path):
+ bootdir = os.path.join(local_system_path, 'boot')
+ image_names = ['vmlinuz', 'zImage', 'uImage']
+
+ for name in image_names:
+ try_path = os.path.join(bootdir, name)
+ if os.path.exists(try_path):
+ kernel_path = os.path.relpath(try_path, local_system_path)
+ break
+ else:
+ raise writeexts.ExtensionError(
+ 'Could not find a kernel in the system: none of '
+ '%s found' % ', '.join(image_names))
+ return kernel_path
+
+ def link_kernel_to_tftpboot_path(self, nfs_netloc, system_name,
+ version_label, kernel_relpath):
+ '''Create links for TFTP server for a system's kernel.'''
+
+ remote_system_path = self.system_path(system_name, version_label)
+ kernel_dest = os.path.join(remote_system_path, kernel_relpath)
+
+ self.status(msg='Creating links to %(name)s kernel in tftp directory',
+ name=system_name)
+ tftp_dir = os.path.join(self.nfsboot_root , 'tftp')
+
+ versioned_kernel_name = "%s-%s" % (system_name, version_label)
+ kernel_name = system_name
+
+ writeexts.ssh_runcmd(nfs_netloc,
+ ['ln', '-f', kernel_dest,
+ os.path.join(tftp_dir, versioned_kernel_name)])
+
+ writeexts.ssh_runcmd(nfs_netloc,
+ ['ln', '-sf', versioned_kernel_name,
+ os.path.join(tftp_dir, kernel_name)])
+
+ def set_remote_file_contents(self, nfs_netloc, path, text):
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(text)
+ f.flush()
+ subprocess.check_call(
+ ['scp', f.name, '%s:%s' % (nfs_netloc, path)])
+
+ def set_hostname(self, nfs_netloc, system_name, system_path):
+ hostname_path = os.path.join(system_path, 'etc', 'hostname')
+ self.set_remote_file_contents(
+ nfs_netloc, hostname_path, system_name + '\n')
+
+ def write_distbuild_config(self, nfs_netloc, system_name, system_path,
+ git_server, shared_artifact_cache, trove_id,
+ worker_ssh_key_path, controller_name,
+ worker_names, host_map = {}):
+ '''Write /etc/distbuild/distbuild.conf on the node.
+
+ This .write extension takes advantage of the 'generic' mode of
+ distbuild.configure. Each node is not configured until first-boot,
+ when distbuild-setup.service runs and configures the node based on the
+ contents of /etc/distbuild/distbuild.conf.
+
+ '''
+ def host(hostname):
+ return host_map.get(hostname, hostname)
+
+ config = {
+ 'ARTIFACT_CACHE_SERVER': host(shared_artifact_cache),
+ 'CONTROLLERHOST': host(controller_name),
+ 'TROVE_HOST': host(git_server),
+ 'TROVE_ID': trove_id,
+ 'DISTBUILD_CONTROLLER': system_name == controller_name,
+ 'DISTBUILD_WORKER': system_name in worker_names,
+ 'WORKERS': ', '.join(map(host, worker_names)),
+ 'WORKER_SSH_KEY': '/etc/distbuild/worker.key',
+ }
+
+ config_text = '\n'.join(
+ '%s: %s' % (key, value) for key, value in config.iteritems())
+ config_text = \
+ '# Generated by distbuild-trove-nfsboot.write\n' + \
+ config_text + '\n'
+ path = os.path.join(system_path, 'etc', 'distbuild')
+ writeexts.ssh_runcmd(
+ nfs_netloc, ['mkdir', '-p', path])
+ subprocess.check_call(
+ ['scp', worker_ssh_key_path, '%s:%s' % (nfs_netloc, path)])
+ self.set_remote_file_contents(
+ nfs_netloc, os.path.join(path, 'distbuild.conf'), config_text)
+
+ def configure_nfs_exports(self, nfs_netloc, system_names):
+ '''Ensure the Trove is set up to export the NFS roots we need.
+
+ This doesn't handle setting up the TFTP daemon. We assume that is
+ already running.
+
+ '''
+ for system_name in system_names:
+ exported_path = self.system_path(system_name)
+ exports_path = '/etc/exports'
+
+ # Rather ugly SSH hackery follows to ensure each system path is
+ # listed in /etc/exports.
+ try:
+ writeexts.ssh_runcmd(
+ nfs_netloc, ['grep', '-q', exported_path, exports_path])
+ except writeexts.ExtensionError:
+ ip_mask = '*'
+ options = 'rw,no_subtree_check,no_root_squash,async'
+ exports_string = '%s %s(%s)\n' % (exported_path, ip_mask,
+ options)
+ exports_append_sh = '''\
+ set -eu
+ target="$1"
+ temp=$(mktemp)
+ cat "$target" > "$temp"
+ cat >> "$temp"
+ mv "$temp" "$target"
+ '''
+ writeexts.ssh_runcmd(
+ nfs_netloc,
+ ['sh', '-c', exports_append_sh, '--', exports_path],
+ feed_stdin=exports_string)
+
+ writeexts.ssh_runcmd(nfs_netloc,
+ ['systemctl', 'restart', 'nfs-server.service'])
+
+ def update_default_version(self, remote_netloc, system_name,
+ version_label):
+ self.status(msg='Linking \'default\' to %(version)s for %(system)s',
+ version=version_label, system=system_name)
+ system_path = self.system_path(system_name)
+ system_version_path = os.path.join(system_path, 'systems',
+ version_label)
+ default_path = os.path.join(system_path, 'systems', 'default')
+
+ writeexts.ssh_runcmd(remote_netloc,
+ ['ln', '-sfn', system_version_path, default_path])
+
+
+DistbuildTroveNFSBootWriteExtension().run()
diff --git a/old/extensions/distbuild-trove-nfsboot.write.help b/old/extensions/distbuild-trove-nfsboot.write.help
new file mode 100644
index 00000000..62f1455c
--- /dev/null
+++ b/old/extensions/distbuild-trove-nfsboot.write.help
@@ -0,0 +1,49 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ Deploy a distbuild network, using a Trove to serve the kernel and rootfs.
+
+ The `location` argument is the hostname of the Trove system.
+
+ The following configuration values must be specified:
+
+ - DISTBUILD_CONTROLLER: hostname of controller system
+ - DISTBUILD_WORKERS: hostnames of each worker system
+ - DISTBUILD_GIT_SERVER: Trove hostname
+ - DISTBUILD_SHARED_ARTIFACT_CACHE: Trove hostname
+ - DISTBUILD_TROVE_ID: Trove ID
+ - DISTBUILD_WORKER_SSH_KEY: SSH key to be used for ssh:// repos
+
+ A note on TROVE_ID: the current distbuild-setup service requires that
+ a single 'Trove ID' is specified. This is used in Morph for expanding
+ keyed URLs. If you set TROVE_ID=foo for example, foo:bar will be expanded
+ to git://$GIT_SERVER/foo, in addition to the standard baserock: and
+ upstream: prefixes that you can use.
+
+ The WORKER_SSH_KEY must be provided, even if you don't need it. The
+ distbuild-setup service could be changed to make it optional.
+
+ The following configuration values are optional:
+
+ - HOST_MAP: a list of key=value pairs mapping hostnames to IP addresses,
+ or fully-qualified domain names. Useful if you
+ cannot rely on hostname resolution working for your deploment.
+
+ The extension will connect to root@location via ssh to copy the kernel and
+ rootfs, and configure the nfs server. It will duplicate the kernel and
+ rootfs once for each node in the distbuild network.
+
+ The deployment mechanism makes assumptions about the bootloader
+ configuration of the target machines.
diff --git a/old/extensions/distbuild.configure b/old/extensions/distbuild.configure
new file mode 100644
index 00000000..062aaecc
--- /dev/null
+++ b/old/extensions/distbuild.configure
@@ -0,0 +1,132 @@
+#!/bin/sh
+# Copyright (C) 2013-2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configure extension to configure a Baserock
+# build node, as part of a distributed building cluster. It uses the
+# following variables from the environment:
+#
+# * DISTBUILD_CONTROLLER: if 'yes', machine is set up as the controller.
+# * DISTBUILD_WORKER: if 'yes', machine is set up as a worker.
+# * TROVE_ID: hostname and Trove prefix of the server to pull source
+# from and push built artifacts to.
+# * TROVE_HOST: FQDN of the same server as in TROVE_ID
+#
+# The following variable is optional:
+#
+# * ARTIFACT_CACHE_SERVER: by default artifacts are pushed to the same
+# Trove that served the source, but you can use a different one.
+#
+# The following variable is required for worker nodes only:
+#
+# * CONTROLLERHOST: hostname or IP address of distbuild controller machine.
+# * WORKER_SSH_KEY: identity used to authenticate with Trove
+#
+# The following variable is required for the controller node only:
+#
+# * WORKERS: hostnames or IP address of worker nodes, comma-separated.
+
+set -e
+
+if [ -n "$DISTBUILD_GENERIC" ]; then
+ echo "Not configuring the distbuild node, it will be generic"
+ exit 0
+fi
+
+# Set default values for these two options if they are unset, so that if the
+# user specifies no distbuild config at all the configure extension exits
+# without doing anything but does not raise an error.
+DISTBUILD_CONTROLLER=${DISTBUILD_CONTROLLER-False}
+DISTBUILD_WORKER=${DISTBUILD_WORKER-False}
+
+if [ "$DISTBUILD_CONTROLLER" = False -a "$DISTBUILD_WORKER" = False ]; then
+ exit 0
+fi
+
+set -u
+
+# Check that all the variables needed are present:
+
+error_vars=false
+
+if [ "x$TROVE_HOST" = "x" ]; then
+ echo "ERROR: TROVE_HOST needs to be defined."
+ error_vars=true
+fi
+
+if [ "x$TROVE_ID" = "x" ]; then
+ echo "ERROR: TROVE_ID needs to be defined."
+ error_vars=true
+fi
+
+if [ "$DISTBUILD_WORKER" = True ]; then
+ if ! ssh-keygen -lf "$WORKER_SSH_KEY" > /dev/null 2>&1; then
+ echo "ERROR: WORKER_SSH_KEY is not a vaild ssh key."
+ error_vars=true
+ fi
+
+ if [ "x$CONTROLLERHOST" = "x" ]; then
+ echo "ERROR: CONTROLLERHOST needs to be defined."
+ error_vars=true
+ fi
+fi
+
+if [ "$DISTBUILD_CONTROLLER" = True ]; then
+ if [ "x$WORKERS" = "x" ]; then
+ echo "ERROR: WORKERS needs to be defined."
+ error_vars=true
+ fi
+fi
+
+if "$error_vars"; then
+ exit 1
+fi
+
+
+ROOT="$1"
+
+DISTBUILD_DATA="$ROOT/etc/distbuild"
+mkdir -p "$DISTBUILD_DATA"
+
+# If it's a worker, install the worker ssh key.
+if [ "$DISTBUILD_WORKER" = True ]
+then
+ install -m 0644 "$WORKER_SSH_KEY" "$DISTBUILD_DATA/worker.key"
+fi
+
+
+
+# Create the configuration file
+python <<'EOF' >"$DISTBUILD_DATA/distbuild.conf"
+import os, sys, yaml
+
+distbuild_configuration={
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_HOST': os.environ['TROVE_HOST'],
+ 'DISTBUILD_WORKER': os.environ['DISTBUILD_WORKER'],
+ 'DISTBUILD_CONTROLLER': os.environ['DISTBUILD_CONTROLLER'],
+ 'WORKER_SSH_KEY': '/etc/distbuild/worker.key',
+}
+
+
+optional_keys = ('ARTIFACT_CACHE_SERVER', 'CONTROLLERHOST', 'WORKERS',
+ 'TROVE_BACKUP_KEYS')
+
+for key in optional_keys:
+ if key in os.environ:
+ distbuild_configuration[key] = os.environ[key]
+
+yaml.dump(distbuild_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/fstab.configure b/old/extensions/fstab.configure
new file mode 100755
index 00000000..3e67b585
--- /dev/null
+++ b/old/extensions/fstab.configure
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright © 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-2 =*=
+
+
+import os
+import sys
+
+import writeexts
+
+envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('FSTAB_')}
+
+conf_file = os.path.join(sys.argv[1], 'etc/fstab')
+writeexts.write_from_dict(conf_file, envvars)
diff --git a/old/extensions/genivi.configure b/old/extensions/genivi.configure
new file mode 100644
index 00000000..c5f6dc4f
--- /dev/null
+++ b/old/extensions/genivi.configure
@@ -0,0 +1,124 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True)
+ eval "$1=true"
+ ;;
+ False|'')
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+check_weston_config(){
+ weston_ini_folder="$ROOT/usr/share/doc/weston"
+ case "$GENIVI_WESTON_CONFIG" in
+ 'baseline'|'')
+ weston_ini_file=ivi-shell-weston.ini
+ ;;
+ 'gdp')
+ weston_ini_file=gdp-weston.ini
+ ;;
+ *)
+ unnaceptable "GENIVI_WESTON_CONFIG"
+ ;;
+ esac
+ weston_ini_file="$weston_ini_folder/$weston_ini_file"
+ if [ ! -f "$weston_ini_file" ]; then
+ echo ERROR: Failed to locate weston config file: $weston_ini_file
+ exit 1
+ fi
+}
+
+check_weston_backend (){
+ # If nothing defined, use drm-backend.so
+ if [ "x$GENIVI_WESTON_BACKEND" == "x" ]; then
+ echo GENIVI_WESTON_BACLEND not set, defaulting to drm-backend.so
+ GENIVI_WESTON_BACKEND=drm-backend.so
+ fi
+
+ backends_folder="$ROOT/usr/lib/weston"
+ backend_file="$backends_folder/$GENIVI_WESTON_BACKEND"
+ # Check that the backend exists
+ echo Checking for "$backend_file" ...
+ if [ ! -f "$backend_file" ]; then
+ echo "File $backend_file doesn't exist"
+ GENIVI_WESTON_BACKEND="$GENIVI_WESTON_BACKEND-backend.so"
+ backend_file="$backends_folder/$GENIVI_WESTON_BACKEND"
+ echo Checking for "$backend_file" ...
+ if [ ! -f "$backend_file" ]; then
+ echo "File $backend_file doesn't exist"
+ echo ERROR: Failed to find Weston backend in the system
+ exit 1
+ fi
+ fi
+ echo Backend $backend_file found
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool GENIVI_WESTON_AUTOSTART
+check_weston_config
+check_weston_backend
+
+######################################
+# Create and enable weston.service #
+######################################
+
+cat > "$ROOT/usr/lib/systemd/system/weston.service" <<EOF
+[Unit]
+Description=Weston reference Wayland compositor
+After=dbus.service
+
+[Service]
+ExecStart=/usr/bin/weston-launch -u root -- --log=/tmp/weston.log --backend="$GENIVI_WESTON_BACKEND"
+ExecStop=/usr/bin/killall -s KILL weston
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+if "$GENIVI_WESTON_AUTOSTART"; then
+ enable weston
+fi
+
+######################################
+# Set weston.ini file #
+######################################
+
+install -d "$ROOT/etc/xdg/weston"
+install -m 0644 $weston_ini_file "$ROOT/etc/xdg/weston/weston.ini"
diff --git a/old/extensions/genivi.configure.help b/old/extensions/genivi.configure.help
new file mode 100644
index 00000000..6616f871
--- /dev/null
+++ b/old/extensions/genivi.configure.help
@@ -0,0 +1,25 @@
+help: |
+ This extension configures GENIVI systems. It uses the following
+ configuration variables:
+
+ * `GENIVI_WESTON_CONFIG` (optional, defaults to `baseline`)
+ the weston configuration file to use. The GENIVI baseline system
+ uses a different one than the GENIVI Demo Platform.
+
+ Possibles values here are `baseline` and `gdp`. Other values will
+ fail. The extension will copy the relevant configuration file
+ from `/usr/share/doc/weston/` to `/etc/xdg/weston/weston.ini`
+ to make it the default configuration for Weston.
+
+ * `GENIVI_WESTON_BACKEND` (optional, defaults to 'drm-backend.so')
+ the backend to use with Weston. This backend will be used in
+ the `weston.service` systemd unit overriding the default backend
+ specified when building Weston.
+
+ The extension looks for the backend in the system, failing if
+ it's not present. It will also try to append `-backend.so` to
+ the variable so that (e.g) you can set this variable to `fbdev`
+ and to `fbdev-backend.so`.
+
+ * `GENIVI_WESTON_AUTOSTART`(optional. defaults to 'False')
+ boolean. If `True` it will enable the `weston.service`.
diff --git a/old/extensions/hosts b/old/extensions/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/old/extensions/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/old/extensions/hosts.configure b/old/extensions/hosts.configure
new file mode 100755
index 00000000..11fcf573
--- /dev/null
+++ b/old/extensions/hosts.configure
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+
+
+import os
+import sys
+import socket
+
+import writeexts
+
+def validate(var, line):
+ xs = line.split()
+ if len(xs) == 0:
+ raise writeexts.ExtensionError(
+ "`%s: %s': line is empty" % (var, line))
+
+ ip = xs[0]
+ hostnames = xs[1:]
+
+ if len(hostnames) == 0:
+ raise writeexts.ExtensionError(
+ "`%s: %s': missing hostname" % (var, line))
+
+ family = socket.AF_INET6 if ':' in ip else socket.AF_INET
+
+ try:
+ socket.inet_pton(family, ip)
+ except socket.error:
+ raise writeexts.ExtensionError("`%s: %s' invalid ip" % (var, ip))
+
+envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('HOSTS_')}
+
+conf_file = os.path.join(sys.argv[1], 'etc/hosts')
+writeexts.write_from_dict(conf_file, envvars, validate)
diff --git a/old/extensions/image-package-example/README b/old/extensions/image-package-example/README
new file mode 100644
index 00000000..f6b66cd9
--- /dev/null
+++ b/old/extensions/image-package-example/README
@@ -0,0 +1,9 @@
+Image package example scripts
+=============================
+
+These are scripts used to create disk images or install the system onto
+an existing disk.
+
+This is also implemented independently for the rawdisk.write write
+extension; see writeexts.WriteExtension.create_local_system() for
+a similar, python implementation.
diff --git a/old/extensions/image-package-example/common.sh.in b/old/extensions/image-package-example/common.sh.in
new file mode 100644
index 00000000..9a7389a7
--- /dev/null
+++ b/old/extensions/image-package-example/common.sh.in
@@ -0,0 +1,72 @@
+#!/bin/false
+# Script library to be used by disk-install.sh and make-disk-image.sh
+
+status(){
+ echo "$@"
+}
+
+info(){
+ echo "$@" >&2
+}
+
+warn(){
+ echo "$@" >&2
+}
+
+extract_rootfs(){
+ tar -C "$1" -xf @@ROOTFS_TAR_PATH@@ .
+}
+
+make_disk_image(){
+ truncate --size "$1" "$2"
+}
+
+format_disk(){
+ local disk="$1"
+ mkfs.ext4 -F -L rootfs "$disk"
+}
+
+install_fs_config(){
+ local mountpoint="$1"
+ local rootdisk="${2-/dev/vda}"
+ cat >>"$mountpoint/etc/fstab" <<EOF
+$rootdisk / ext4 rw,errors=remount-ro 0 0
+EOF
+ install -D -m 644 /proc/self/fd/0 "$mountpoint/boot/extlinux.conf" <<EOF
+DEFAULT baserock
+LABEL baserock
+SAY Booting Baserock
+LINUX /boot/vmlinuz
+APPEND root=$rootdisk
+EOF
+}
+
+install_bootloader(){
+ local disk="$1"
+ local mountpoint="$2"
+ dd if=@@IMAGE_DIR@@/mbr.bin conv=notrunc bs=440 count=1 of="$disk"
+ extlinux --install "$mountpoint/boot"
+}
+
+loop_file(){
+ losetup --show --find "$1"
+}
+unloop_file(){
+ #losetup --detach "$1"
+ # unlooping handled by umount -d, for busybox compatibility
+ true
+}
+
+temp_mount(){
+ local mp="$(mktemp -d)"
+ if ! mount "$@" "$mp"; then
+ rmdir "$mp"
+ return 1
+ fi
+ echo "$mp"
+}
+untemp_mount(){
+ # Unmount and detach in one step for busybox compatibility
+ umount -d "$1"
+ rmdir "$1"
+}
diff --git a/old/extensions/image-package-example/disk-install.sh.in b/old/extensions/image-package-example/disk-install.sh.in
new file mode 100644
index 00000000..bc8e0e67
--- /dev/null
+++ b/old/extensions/image-package-example/disk-install.sh.in
@@ -0,0 +1,51 @@
+#!/bin/sh
+# Script for writing the system to an existing disk.
+# This formats the disk, extracts the rootfs to it, installs the
+# bootloader, and ensures there's appropriate configuration for the
+# bootloader, kernel and userland to agree what the rootfs is.
+
+set -eu
+
+usage(){
+ cat <<EOF
+usage: $0 DISK [TARGET_DISK]
+
+DISK: Where the disk appears on your development machine
+TARGET_DISK: What the disk will appear as on the target machine
+EOF
+}
+
+. @@SCRIPT_DIR@@/common.sh
+
+if [ "$#" -lt 1 -o "$#" -gt 2 ]; then
+ usage
+ exit 1
+fi
+
+DISK="$1"
+TARGET_DISK="${1-/dev/sda}"
+
+status Formatting "$DISK" as ext4
+format_disk "$DISK"
+(
+ info Mounting "$DISK"
+ MP="$(temp_mount -t ext4 "$DISK")"
+ info Mounted "$DISK" to "$MP"
+ set +e
+ (
+ set -e
+ info Copying rootfs onto disk
+ extract_rootfs "$MP"
+ info Configuring disk paths
+ install_fs_config "$MP" "$TARGET_DISK"
+ info Installing bootloader
+ install_bootloader "$DISK" "$MP"
+ )
+ ret="$?"
+ if [ "$ret" != 0 ]; then
+ warn Filling rootfs failed with "$ret"
+ fi
+ info Unmounting "$DISK" from "$MP" and removing "$MP"
+ untemp_mount "$MP"
+ exit "$ret"
+)
diff --git a/old/extensions/image-package-example/make-disk-image.sh.in b/old/extensions/image-package-example/make-disk-image.sh.in
new file mode 100644
index 00000000..61264fa0
--- /dev/null
+++ b/old/extensions/image-package-example/make-disk-image.sh.in
@@ -0,0 +1,36 @@
+#!/bin/sh
+# Script for writing the system to a disk image file.
+# This creates a file of the right size, attaches it to a loop device,
+# then hands the rest of the work off to disk-install.sh
+
+usage(){
+ cat <<EOF
+usage: $0 FILENAME SIZE [TARGET_DISK]
+
+FILENAME: Location to write the disk image to
+SIZE: Size to create the disk image with
+TARGET_DISK: What the disk will appear as on the target machine
+EOF
+}
+
+. @@SCRIPT_DIR@@/common.sh
+
+if [ "$#" -lt 2 -o "$#" -gt 3 ]; then
+ usage
+ exit 1
+fi
+
+DISK_IMAGE="$1"
+DISK_SIZE="$2"
+TARGET_DISK="${3-/dev/vda}"
+
+make_disk_image "$DISK_SIZE" "$DISK_IMAGE"
+
+(
+ LOOP="$(loop_file "$DISK_IMAGE")"
+ set +e
+ @@SCRIPT_DIR@@/disk-install.sh "$DISK_IMAGE" "$TARGET_DISK"
+ ret="$?"
+ unloop_file "$LOOP"
+ exit "$ret"
+)
diff --git a/old/extensions/image-package.write b/old/extensions/image-package.write
new file mode 100755
index 00000000..15ceadcf
--- /dev/null
+++ b/old/extensions/image-package.write
@@ -0,0 +1,168 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+#
+#
+# This is a write extension for making a package that can be used to
+# install the produced system. Ideally we'd instead have Baserock
+# everywhere to do the deployment, but we need to support this workflow
+# until that is possible.
+#
+# This write extension produces a tarball, which contains:
+# - a tarball of the configured system root file system
+# - any supporting files listed in BOOTLOADER_BLOBS
+# - any supporting scripts, generated from templates listed in
+# INCLUDE_SCRIPTS
+#
+# The extension requires the following environment variables to be set:
+#
+# * BOOTLOADER_BLOBS: files to include besides rootfs tarball,
+# paths are relative to the root of the built rootfs
+# works on any kind of file in the rootfs, named
+# BOOTLOADER_BLOBS since that's the common use-case
+# :-separated by default
+# * INCLUDE_SCRIPTS: script templates that are included in the package
+# after being filled out
+# file paths are relative to the definitions repository
+# :-separated by default
+#
+# The script templates may contain any of the following strings, which
+# will be replaced with a string which will expand to the appropriate
+# value as a shell word:
+# - @@SCRIPT_DIR@@: the path the script files are installed to
+# - @@IMAGE_DIR@@: the path BOOTLOADER_BLOBS are installed to
+# - @@ROOTFS_TAR_PATH@@: path to the rootfs tarball
+#
+# The interpolated strings may run commands dependant on the current
+# working directory, so if `cd` is required, bind these values to a
+# variable beforehand.
+#
+# The following optional variables can be set as well:
+#
+# * INCLUDE_SCRIPTS_SEPARATOR: character to separate INCLUDE_SCRIPTS with (default: :)
+# * BOOTLOADER_BLOBS_SEPARATOR: character to separate BOOTLOADER_BLOBS with (default: :)
+# * SCRIPT_SUBDIR: where in the package processed scripts are installed to (default: tools)
+# * IMAGE_SUBDIR: where in the package BOOTLOADER_BLOBS are copied to (default: image_files)
+# * ROOTFS_TAR: name to call the rootfs tarball inside IMAGE_SUBDIR (default: rootfs.tar)
+# * OUTPUT_COMPRESS: compression used for output tarball (default: none)
+# * ROOTFS_COMPRESS: compression used for rootfs (default: none)
+
+set -eu
+
+die(){
+ echo "$@" >&2
+ exit 1
+}
+
+warn(){
+ echo "$@" >&2
+}
+
+info(){
+ echo "$@" >&2
+}
+
+shellescape(){
+ echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'"
+}
+
+sedescape(){
+ # Escape the passed in string so it can be safely interpolated into
+ # a sed expression as a literal value.
+ echo "$1" | sed -e 's/[\/&]/\\&/g'
+}
+
+ROOTDIR="$1"
+OUTPUT_TAR="$2"
+td="$(mktemp -d)"
+IMAGE_SUBDIR="${IMAGE_SUBDIR-image_files}"
+SCRIPT_SUBDIR="${SCRIPT_SUBDIR-tools}"
+ROOTFS_TAR="${ROOTFS_TAR-rootfs.tar}"
+
+# Generate shell snippets that will expand to paths to various resources
+# needed by the scripts.
+# They expand to a single shell word, so constructs like the following work
+# SCRIPT_DIR=@@SCRIPT_DIR@@
+# dd if="$SCRIPT_DIR/mbr" of="$disk" count=1
+# tar -C "$mountpoint" -xf @@ROOTFS_TAR_PATH@@ .
+find_script_dir='"$(readlink -f "$(dirname "$0")")"'
+image_dir="$find_script_dir/../$(shellescape "$IMAGE_SUBDIR")"
+rootfs_tar_path="$image_dir/$(shellescape "$ROOTFS_TAR")"
+
+install_script(){
+ local source_file="$1"
+ local output_dir="$2"
+ local target_file="$output_dir/$SCRIPT_SUBDIR/$(basename "$source_file" .in)"
+ sed -e "s/@@SCRIPT_DIR@@/$(sedescape "$find_script_dir")/g" \
+ -e "s/@@IMAGE_DIR@@/$(sedescape "$image_dir")/g" \
+ -e "s/@@ROOTFS_TAR_PATH@@/$(sedescape "$rootfs_tar_path")/g" \
+ "$source_file" \
+ | install -D -m 755 /proc/self/fd/0 "$target_file"
+}
+
+install_scripts(){
+ local output_dir="$1"
+ (
+ IFS="${INCLUDE_SCRIPTS_SEPARATOR-:}"
+ for script in $INCLUDE_SCRIPTS; do
+ local script_path="$(pwd)/$script"
+ if [ ! -e "$script_path" ]; then
+ warn Script "$script" not found, ignoring
+ continue
+ fi
+ install_script "$script" "$output_dir"
+ done
+ )
+}
+
+install_bootloader_blobs(){
+ local output_dir="$1"
+ local image_dir="$output_dir/$IMAGE_SUBDIR"
+ (
+ IFS="${BOOTLOADER_BLOBS_SEPARATOR-:}"
+ for blob in $BOOTLOADER_BLOBS; do
+ local blob_path="$ROOTDIR/$blob"
+ if [ ! -e "$blob_path" ]; then
+ warn Bootloader blob "$blob" not found, ignoring
+ continue
+ fi
+ install -D -m644 "$blob_path" "$image_dir/$(basename "$blob_path")"
+ done
+ )
+}
+
+# Determine a basename for our directory as the same as our tarball with
+# extensions removed. This is needed, since tarball packages usually
+# have a base directory of its contents, rather then extracting into the
+# current directory.
+output_dir="$(basename "$OUTPUT_TAR")"
+for ext in .xz .bz2 .gzip .gz .tgz .tar; do
+ output_dir="${output_dir%$ext}"
+done
+
+info Installing scripts
+install_scripts "$td/$output_dir"
+
+info Installing bootloader blobs
+install_bootloader_blobs "$td/$output_dir"
+
+info Writing rootfs tar to "$IMAGE_SUBDIR/$ROOTFS_TAR"
+tar -C "$ROOTDIR" -c . \
+| sh -c "${ROOTFS_COMPRESS-cat}" >"$td/$output_dir/$IMAGE_SUBDIR/$ROOTFS_TAR"
+
+info Writing image package tar to "$OUTPUT_TAR"
+tar -C "$td" -c "$output_dir" | sh -c "${OUTPUT_COMPRESS-cat}" >"$OUTPUT_TAR"
diff --git a/old/extensions/initramfs.write b/old/extensions/initramfs.write
new file mode 100755
index 00000000..1059defa
--- /dev/null
+++ b/old/extensions/initramfs.write
@@ -0,0 +1,26 @@
+#!/bin/sh
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-2 =*=
+
+set -e
+
+ROOTDIR="$1"
+INITRAMFS_PATH="$2"
+
+(cd "$ROOTDIR" &&
+ find . -print0 |
+ cpio -0 -H newc -o) |
+ gzip -c | install -D -m644 /dev/stdin "$INITRAMFS_PATH"
diff --git a/old/extensions/initramfs.write.help b/old/extensions/initramfs.write.help
new file mode 100644
index 00000000..54d3ae8c
--- /dev/null
+++ b/old/extensions/initramfs.write.help
@@ -0,0 +1,55 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Create an initramfs for a system by taking an existing system and
+ converting it to the appropriate format.
+
+ The system must have a `/init` executable as the userland entry-point.
+ This can have a different path, if `rdinit=$path` is added to
+ the kernel command line. This can be added to the `rawdisk`,
+ `virtualbox-ssh` and `kvm` write extensions with the `KERNEL_CMDLINE`
+ option.
+
+ It is possible to use a ramfs as the final rootfs without a `/init`
+ executable, by setting `root=/dev/mem`, or `rdinit=/sbin/init`,
+ but this is beyond the scope for the `initramfs.write` extension.
+
+ The intended use of initramfs.write is to be part of a nested
+ deployment, so the parent system has an initramfs stored as
+ `/boot/initramfs.gz`. See the following example:
+
+ name: initramfs-test
+ kind: cluster
+ systems:
+ - morph: minimal-system-x86_64-generic
+ deploy:
+ system:
+ type: rawdisk
+ location: initramfs-system-x86_64.img
+ DISK_SIZE: 1G
+ HOSTNAME: initramfs-system
+ INITRAMFS_PATH: boot/initramfs.gz
+ subsystems:
+ - morph: initramfs-x86_64
+ deploy:
+ initramfs:
+ type: initramfs
+ location: boot/initramfs.gz
+
+ Parameters:
+
+ * location: the path where the initramfs will be installed (e.g.
+ `boot/initramfs.gz`) in the above example
diff --git a/old/extensions/install-essential-files.configure b/old/extensions/install-essential-files.configure
new file mode 100755
index 00000000..8314b56d
--- /dev/null
+++ b/old/extensions/install-essential-files.configure
@@ -0,0 +1,40 @@
+#!/usr/bin/env python2
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+''' A Morph configuration extension for adding essential files to a system
+
+It will read the manifest files located in essential-files/manifest,
+then use the contens of those files to determine which files
+to install into the target system.
+
+'''
+
+import os
+import subprocess
+import sys
+
+target_root = sys.argv[1]
+
+# Clear all INSTALL_FILES environment variable options,
+# so we don't end up installing INSTALL_FILES_foo multiple times.
+for var in list(os.environ):
+ if var.startswith("INSTALL_FILES"):
+ del os.environ[var]
+
+# Force installation of the essential-files manifest
+os.environ["INSTALL_FILES"] = "install-files/essential-files/manifest"
+command = "extensions/install-files.configure"
+subprocess.check_call([command, target_root])
diff --git a/old/extensions/install-essential-files.configure.help b/old/extensions/install-essential-files.configure.help
new file mode 100644
index 00000000..9148aeff
--- /dev/null
+++ b/old/extensions/install-essential-files.configure.help
@@ -0,0 +1,20 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ This installs files from the essential-files/ folder in your
+ definitions.git repo, according to essential-files/manifest.
+
+ It wraps the install-files.configure extension. Take a look to that
+ extension help to know more about the format of the manifest file.
diff --git a/old/extensions/install-files.configure b/old/extensions/install-files.configure
new file mode 100755
index 00000000..54481b97
--- /dev/null
+++ b/old/extensions/install-files.configure
@@ -0,0 +1,138 @@
+#!/usr/bin/python2
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+''' A Morph configuration extension for adding arbitrary files to a system
+
+It will read the manifest files specified in environment variables starting
+INSTALL_FILES, then use the contens of those files to determine which files
+to install into the target system.
+
+'''
+
+import errno
+import os
+import re
+import sys
+import shlex
+import shutil
+import stat
+
+try:
+ import jinja2
+ jinja_available = True
+except ImportError:
+ jinja_available = False
+
+import writeexts
+
+class InstallFilesConfigureExtension(writeexts.Extension):
+
+ def process_args(self, args):
+ if not any(var.startswith('INSTALL_FILES') for var in os.environ):
+ return
+ target_root = args[0]
+ for manifest_var in sorted((var for var in os.environ
+ if var.startswith('INSTALL_FILES'))):
+ manifests = shlex.split(os.environ[manifest_var])
+ for manifest in manifests:
+ self.install_manifest(manifest, target_root)
+
+ def install_manifest(self, manifest, target_root):
+ manifest_dir = os.path.dirname(manifest)
+ with open(manifest) as f:
+ entries = f.readlines()
+ for entry in entries:
+ self.install_entry(entry, manifest_dir, target_root)
+
+ def force_symlink(self, source, link_name):
+ try:
+ os.symlink(source, link_name)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ os.remove(link_name)
+ os.symlink(source, link_name)
+
+ def install_entry(self, entry, manifest_root, target_root):
+ m = re.match('(template )?(overwrite )?'
+ '([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry)
+
+ if m:
+ template = m.group(1)
+ overwrite = m.group(2)
+ mode = int(m.group(3), 8) # mode is octal
+ uid = int(m.group(4))
+ gid = int(m.group(5))
+ path = m.group(6)
+ else:
+ raise writeexts.ExtensionError(
+ 'Invalid manifest entry, '
+ 'format: [template] [overwrite] '
+ '<octal mode> <uid decimal> <gid decimal> <filename>')
+
+ dest_path = os.path.join(target_root, './' + path)
+ if stat.S_ISDIR(mode):
+ if os.path.exists(dest_path) and not overwrite:
+ dest_stat = os.stat(dest_path)
+ if (mode != dest_stat.st_mode
+ or uid != dest_stat.st_uid
+ or gid != dest_stat.st_gid):
+ raise writeexts.ExtensionError(
+ '"%s" exists and is not identical to directory '
+ '"%s"' % (dest_path, entry))
+ else:
+ os.mkdir(dest_path, mode)
+ os.chown(dest_path, uid, gid)
+ os.chmod(dest_path, mode)
+
+ elif stat.S_ISLNK(mode):
+ if os.path.lexists(dest_path) and not overwrite:
+ raise writeexts.ExtensionError('Symlink already exists at %s'
+ % dest_path)
+ else:
+ linkdest = os.readlink(os.path.join(manifest_root,
+ './' + path))
+ self.force_symlink(linkdest, dest_path)
+ os.lchown(dest_path, uid, gid)
+
+ elif stat.S_ISREG(mode):
+ if os.path.lexists(dest_path) and not overwrite:
+ raise writeexts.ExtensionError('File already exists at %s'
+ % dest_path)
+ else:
+ if template:
+ if not jinja_available:
+ raise writeexts.ExtensionError(
+ "Failed to install template file `%s': "
+ 'install-files templates require jinja2'
+ % path)
+
+ loader = jinja2.FileSystemLoader(manifest_root)
+ env = jinja2.Environment(loader=loader,
+ keep_trailing_newline=True)
+
+ env.get_template(path).stream(os.environ).dump(dest_path)
+ else:
+ shutil.copyfile(os.path.join(manifest_root, './' + path),
+ dest_path)
+
+ os.chown(dest_path, uid, gid)
+ os.chmod(dest_path, mode)
+
+ else:
+ raise writeexts.ExtensionError('Mode given in "%s" is not a file,'
+ ' symlink or directory' % entry)
+
+InstallFilesConfigureExtension().run()
diff --git a/old/extensions/install-files.configure.help b/old/extensions/install-files.configure.help
new file mode 100644
index 00000000..191e1378
--- /dev/null
+++ b/old/extensions/install-files.configure.help
@@ -0,0 +1,86 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ Install a set of files onto a system
+
+ To use this extension you create a directory of files you want to install
+ onto the target system.
+
+ In this example we want to copy some ssh keys onto a system
+
+ % mkdir sshkeyfiles
+ % mkdir -p sshkeyfiles/root/.ssh
+ % cp id_rsa sshkeyfiles/root/.ssh
+ % cp id_rsa.pub sshkeyfiles/root/.ssh
+
+ Now we need to create a manifest file to set the file modes
+ and persmissions. The manifest file should be created inside the
+ directory that contains the files we're trying to install.
+
+ cat << EOF > sshkeyfiles/manifest
+ 0040755 0 0 /root/.ssh
+ 0100600 0 0 /root/.ssh/id_rsa
+ 0100644 0 0 /root/.ssh/id_rsa.pub
+ EOF
+
+ Then we add the path to our manifest to our cluster morph,
+ this path should be relative to the system definitions repository.
+
+ INSTALL_FILES: sshkeysfiles/manifest
+
+ All variables starting INSTALL_FILES are considered, and are processed in
+ alphabetical order, so if INSTALL_FILES, INSTALL_FILES_distbuild and
+ INSTALL_FILES_openstack are given, manifests in INSTALL_FILES are processed
+ before those in INSTALL_FILES_distbuild, followed by INSTALL_FILES_openstack.
+
+ Multiple manifest files may be given in the same INSTALL_FILES variable,
+ by providing a whitespace separated list.
+
+ Shell word splitting is supported, so if a manifest's path has spaces in,
+ the path may be shell escaped.
+
+
+ More generally entries in the manifest are formatted as:
+ [overwrite] <octal mode> <uid decimal> <gid decimal> <filename>
+
+ NOTE: Directories on the target must be created if they do not exist.
+
+ The extension supports files, symlinks and directories.
+
+ For example,
+
+ 0100644 0 0 /etc/issue
+
+ creates a regular file at /etc/issue with 644 permissions,
+ uid 0 and gid 0, if the file doesn't already exist.
+
+ overwrite 0100644 0 0 /etc/issue
+
+ creates a regular file at /etc/issue with 644 permissions,
+ uid 0 and gid 0, if the file already exists it is overwritten.
+
+ 0100755 0 0 /usr/bin/foo
+
+ creates an executable file at /usr/bin/foo
+
+ 0040755 0 0 /etc/foodir
+
+ creates a directory with 755 permissions
+
+ 0120000 0 0 /usr/bin/bar
+
+ creates a symlink at /usr/bin/bar
+
+ NOTE: You will still need to make a symlink in the manifest directory.
diff --git a/old/extensions/installer.configure b/old/extensions/installer.configure
new file mode 100755
index 00000000..995038ac
--- /dev/null
+++ b/old/extensions/installer.configure
@@ -0,0 +1,48 @@
+#!/usr/bin/python2
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to configure an installer
+# system. It will create the configuration needed in the installer system
+# to perform an installation. It uses the following variables from the
+# environment:
+#
+# * INSTALLER_TARGET_STORAGE_DEVICE
+# * INSTALLER_ROOTFS_TO_INSTALL
+# * INSTALLER_POST_INSTALL_COMMAND (optional, defaults to `reboot -f`)
+
+import os
+import sys
+import yaml
+
+install_config_file = os.path.join(sys.argv[1], 'etc', 'install.conf')
+
+try:
+ installer_configuration = {
+ 'INSTALLER_TARGET_STORAGE_DEVICE': os.environ['INSTALLER_TARGET_STORAGE_DEVICE'],
+ 'INSTALLER_ROOTFS_TO_INSTALL': os.environ['INSTALLER_ROOTFS_TO_INSTALL'],
+ }
+except KeyError as e:
+ print "Not configuring as an installer system"
+ sys.exit(0)
+
+postinstkey = 'INSTALLER_POST_INSTALL_COMMAND'
+installer_configuration[postinstkey] = os.environ.get(postinstkey, 'reboot -f')
+
+with open(install_config_file, 'w') as f:
+ f.write( yaml.dump(installer_configuration, default_flow_style=False) )
+
+print "Configuration of the installer system in %s" % install_config_file
diff --git a/old/extensions/jffs2.write b/old/extensions/jffs2.write
new file mode 100644
index 00000000..8ff918df
--- /dev/null
+++ b/old/extensions/jffs2.write
@@ -0,0 +1,64 @@
+#!/usr/bin/python2
+#-*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for creating images with jffs2
+ as the root filesystem.'''
+
+
+import os
+import subprocess
+
+import writeexts
+
+
+class Jffs2WriteExtension(writeexts.WriteExtension):
+
+ '''See jffs2.write.help for documentation.'''
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError('Wrong number of command line args')
+
+ temp_root, location = args
+
+ try:
+ self.create_jffs2_system(temp_root, location)
+ self.status(msg='Disk image has been created at %(location)s',
+ location=location)
+ except Exception:
+ self.status(msg='Failure to deploy system to %(location)s',
+ location=location)
+ raise
+
+ def create_jffs2_system(self, temp_root, location):
+ erase_block = self.get_erase_block_size()
+ subprocess.check_call(
+ ['mkfs.jffs2', '--pad', '--no-cleanmarkers',
+ '--eraseblock='+erase_block, '-d', temp_root, '-o', location])
+
+ def get_erase_block_size(self):
+ erase_block = os.environ.get('ERASE_BLOCK', '')
+
+ if erase_block == '':
+ raise writeexts.ExtensionError('ERASE_BLOCK was not given')
+
+ if not erase_block.isdigit():
+ raise writeexts.ExtensionError('ERASE_BLOCK must be a whole number')
+
+ return erase_block
+
+Jffs2WriteExtension().run()
diff --git a/old/extensions/jffs2.write.help b/old/extensions/jffs2.write.help
new file mode 100644
index 00000000..059a354b
--- /dev/null
+++ b/old/extensions/jffs2.write.help
@@ -0,0 +1,28 @@
+#-*- coding: utf-8 -*-
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Creates a system produced by Morph build with a jffs2 filesystem and then
+ writes to an image. To use this extension, the host system must have access
+ to mkfs.jffs2 which is provided in the mtd-utilities.morph stratum.
+
+ Parameters:
+
+ * location: the pathname of the disk image to be created/upgraded, or the
+ path to the physical device.
+
+ * ERASE_BLOCK: the erase block size of the target system, which can be
+ found in '/sys/class/mtd/mtdx/erasesize'
diff --git a/old/extensions/kvm.check b/old/extensions/kvm.check
new file mode 100755
index 00000000..9ed439dc
--- /dev/null
+++ b/old/extensions/kvm.check
@@ -0,0 +1,171 @@
+#!/usr/bin/python2
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'kvm' write extension'''
+
+import os
+import re
+import urlparse
+
+import writeexts
+
+
+class KvmPlusSshCheckExtension(writeexts.WriteExtension):
+
+ location_pattern = '^/(?P<guest>[^/]+)(?P<path>/.+)$'
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise writeexts.ExtensionError(
+ 'Use the `ssh-rsync` write extension to deploy upgrades to an '
+ 'existing remote system.')
+
+ location = args[0]
+ ssh_host, vm_name, vm_path = self.check_and_parse_location(location)
+
+ self.check_ssh_connectivity(ssh_host)
+ self.check_can_create_file_at_given_path(ssh_host, vm_path)
+ self.check_no_existing_libvirt_vm(ssh_host, vm_name)
+ self.check_extra_disks_exist(ssh_host, self.parse_attach_disks())
+ self.check_virtual_networks_are_started(ssh_host)
+ self.check_host_has_virtinstall(ssh_host)
+
+ def check_and_parse_location(self, location):
+ '''Check and parse the location argument to get relevant data.'''
+
+ x = urlparse.urlparse(location)
+
+ if x.scheme != 'kvm+ssh':
+ raise writeexts.ExtensionError(
+ 'URL schema must be kvm+ssh in %s' % location)
+
+ m = re.match(self.location_pattern, x.path)
+ if not m:
+ raise writeexts.ExtensionError(
+ 'Cannot parse location %s' % location)
+
+ return x.netloc, m.group('guest'), m.group('path')
+
+ def check_no_existing_libvirt_vm(self, ssh_host, vm_name):
+ try:
+ writeexts.ssh_runcmd(ssh_host,
+ ['virsh', '--connect', 'qemu:///system', 'domstate', vm_name])
+ except writeexts.ExtensionError as e:
+ pass
+ else:
+ raise writeexts.ExtensionError(
+ 'Host %s already has a VM named %s. You can use the ssh-rsync '
+ 'write extension to deploy upgrades to existing machines.' %
+ (ssh_host, vm_name))
+
+ def check_can_create_file_at_given_path(self, ssh_host, vm_path):
+
+ def check_can_write_to_given_path():
+ try:
+ writeexts.ssh_runcmd(ssh_host, ['touch', vm_path])
+ except writeexts.ExtensionError as e:
+ raise writeexts.ExtensionError(
+ "Can't write to location %s on %s" % (vm_path, ssh_host))
+ else:
+ writeexts.ssh_runcmd(ssh_host, ['rm', vm_path])
+
+ try:
+ writeexts.ssh_runcmd(ssh_host, ['test', '-e', vm_path])
+ except writeexts.ExtensionError as e:
+ # vm_path doesn't already exist, so let's test we can write
+ check_can_write_to_given_path()
+ else:
+ raise writeexts.ExtensionError('%s already exists on %s'
+ % (vm_path, ssh_host))
+
+ def check_extra_disks_exist(self, ssh_host, filename_list):
+ for filename in filename_list:
+ try:
+ writeexts.ssh_runcmd(ssh_host, ['ls', filename])
+ except writeexts.ExtensionError as e:
+ raise writeexts.ExtensionError(
+ 'Did not find file %s on host %s' % (filename, ssh_host))
+
+ def check_virtual_networks_are_started(self, ssh_host):
+
+ def check_virtual_network_is_started(network_name):
+ cmd = ['virsh', '-c', 'qemu:///system', 'net-info', network_name]
+ net_info = writeexts.ssh_runcmd(ssh_host, cmd).split('\n')
+
+ def pretty_concat(lines):
+ return '\n'.join(['\t%s' % line for line in lines])
+
+ for line in net_info:
+ m = re.match('^Active:\W*(\w+)\W*', line)
+ if m:
+ break
+ else:
+ raise writeexts.ExtensionError(
+ "Got unexpected output parsing output of `%s':\n%s"
+ % (' '.join(cmd), pretty_concat(net_info)))
+
+ network_active = m.group(1) == 'yes'
+
+ if not network_active:
+ raise writeexts.ExtensionError("Network '%s' is not started"
+ % network_name)
+
+ def name(nic_entry):
+ if ',' in nic_entry:
+ # network=NETWORK_NAME,mac=12:34,model=e1000...
+ return nic_entry[:nic_entry.find(',')].lstrip('network=')
+ else:
+ return nic_entry.lstrip('network=') # NETWORK_NAME
+
+ if 'NIC_CONFIG' in os.environ:
+ nics = os.environ['NIC_CONFIG'].split()
+
+ for n in nics:
+ if not (n.startswith('network=')
+ or n.startswith('bridge=')
+ or n == 'user'):
+ raise writeexts.ExtensionError(
+ "malformed NIC_CONFIG: %s\n"
+ " (expected 'bridge=BRIDGE' 'network=NAME'"
+ " or 'user')" % n)
+
+ # --network bridge= is used to specify a bridge
+ # --network user is used to specify a form of NAT
+ # (see the virt-install(1) man page)
+ networks = [name(n) for n in nics if not n.startswith('bridge=')
+ and not n.startswith('user')]
+ else:
+ networks = ['default']
+
+ for network in networks:
+ check_virtual_network_is_started(network)
+
+ def check_host_has_virtinstall(self, ssh_host):
+ try:
+ writeexts.ssh_runcmd(ssh_host, ['which', 'virt-install'])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError(
+ 'virt-install does not seem to be installed on host %s'
+ % ssh_host)
+
+
+KvmPlusSshCheckExtension().run()
diff --git a/old/extensions/kvm.write b/old/extensions/kvm.write
new file mode 100755
index 00000000..d29f52e2
--- /dev/null
+++ b/old/extensions/kvm.write
@@ -0,0 +1,126 @@
+#!/usr/bin/python2
+# Copyright (C) 2012-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to KVM+libvirt.
+
+See file kvm.write.help for documentation
+
+'''
+
+
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import urlparse
+
+import writeexts
+
+
+class KvmPlusSshWriteExtension(writeexts.WriteExtension):
+
+ location_pattern = '^/(?P<guest>[^/]+)(?P<path>/.+)$'
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ temp_root, location = args
+ ssh_host, vm_name, vm_path = self.parse_location(location)
+ autostart = self.get_environment_boolean('AUTOSTART')
+
+ fd, raw_disk = tempfile.mkstemp()
+ os.close(fd)
+ self.create_local_system(temp_root, raw_disk)
+
+ try:
+ self.transfer(raw_disk, ssh_host, vm_path)
+ self.create_libvirt_guest(ssh_host, vm_name, vm_path, autostart)
+ except BaseException:
+ sys.stderr.write('Error deploying to libvirt')
+ os.remove(raw_disk)
+ writeexts.ssh_runcmd(ssh_host, ['rm', '-f', vm_path])
+ raise
+ else:
+ os.remove(raw_disk)
+
+ self.status(
+ msg='Virtual machine %(vm_name)s has been created',
+ vm_name=vm_name)
+
+ def parse_location(self, location):
+ '''Parse the location argument to get relevant data.'''
+
+ x = urlparse.urlparse(location)
+ m = re.match('^/(?P<guest>[^/]+)(?P<path>/.+)$', x.path)
+ return x.netloc, m.group('guest'), m.group('path')
+
+ def transfer(self, raw_disk, ssh_host, vm_path):
+ '''Transfer raw disk image to libvirt host.'''
+
+ self.status(msg='Transferring disk image')
+
+ xfer_hole_path = writeexts.get_data_path('xfer-hole')
+ recv_hole = writeexts.get_data('recv-hole')
+
+ ssh_remote_cmd = [
+ 'sh', '-c', recv_hole, 'dummy-argv0', 'file', vm_path
+ ]
+
+ xfer_hole_proc = subprocess.Popen(
+ ['python', xfer_hole_path, raw_disk],
+ stdout=subprocess.PIPE)
+ recv_hole_proc = subprocess.Popen(
+ ['ssh', ssh_host] + map(writeexts.shell_quote, ssh_remote_cmd),
+ stdin=xfer_hole_proc.stdout)
+ xfer_hole_proc.stdout.close()
+ recv_hole_proc.communicate()
+
+ def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart):
+ '''Create the libvirt virtual machine.'''
+
+ self.status(msg='Creating libvirt/kvm virtual machine')
+
+ attach_disks = self.parse_attach_disks()
+ attach_opts = []
+ for disk in attach_disks:
+ attach_opts.extend(['--disk', 'path=%s' % disk])
+
+ if 'NIC_CONFIG' in os.environ:
+ nics = os.environ['NIC_CONFIG'].split()
+ for nic in nics:
+ attach_opts.extend(['--network', nic])
+
+ ram_mebibytes = str(self.get_ram_size() / (1024**2))
+
+ vcpu_count = str(self.get_vcpu_count())
+
+ cmdline = ['virt-install', '--connect', 'qemu:///system',
+ '--import', '--name', vm_name, '--vnc',
+ '--ram', ram_mebibytes, '--vcpus', vcpu_count,
+ '--disk', 'path=%s,bus=ide' % vm_path] + attach_opts
+ if not autostart:
+ cmdline += ['--noreboot']
+ writeexts.ssh_runcmd(ssh_host, cmdline)
+
+ if autostart:
+ writeexts.ssh_runcmd(ssh_host,
+ ['virsh', '--connect', 'qemu:///system',
+ 'autostart', vm_name])
+
+KvmPlusSshWriteExtension().run()
diff --git a/old/extensions/kvm.write.help b/old/extensions/kvm.write.help
new file mode 100644
index 00000000..812a5309
--- /dev/null
+++ b/old/extensions/kvm.write.help
@@ -0,0 +1,90 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Deploy a Baserock system as a *new* KVM/LibVirt virtual machine.
+
+ Use the `ssh-rsync` write extension to deploy upgrades to an *existing* VM
+
+ Parameters:
+
+ * location: a custom URL scheme of the form `kvm+ssh://HOST/GUEST/PATH`,
+ where:
+ * HOST is the name of the host on which KVM/LibVirt is running
+ * GUEST is the name of the guest VM on that host
+ * PATH is the path to the disk image that should be created,
+ on that host. For example,
+ `kvm+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where
+ * `alice@192.168.122.1` is the target host as given to ssh,
+ **from within the development host** (which may be
+ different from the target host's normal address);
+ * `testsys` is the name of the new guest VM';
+ * `/home/alice/testys.img` is the pathname of the disk image files
+ on the target host.
+
+ * HOSTNAME=name: the hostname of the **guest** VM within the network into
+ which it is being deployed
+
+ * DISK_SIZE=X: the size of the VM's primary virtual hard disk. `X` should
+ use a suffix of `K`, `M`, or `G` (in upper or lower case) to indicate
+ kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a
+ 100 gigabyte disk image. **This parameter is mandatory**.
+
+ * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate
+ for itself from the host. `X` is interpreted in the same was as for
+ DISK_SIZE`, and defaults to `1G`
+
+ * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do
+ not use more CPU cores than you have available physically (real cores, no
+ hyperthreads)
+
+ * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to
+ tell Linux to use, rather than booting the rootfs directly.
+
+ * AUTOSTART=<VALUE>` - boolean. If it is set, the VM will be started when
+ it has been deployed.
+
+ * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree
+ binary - Give the full path (without a leading /) to the location of the
+ DTB in the built system image . The deployment will fail if `path` does
+ not exist.
+
+ * BOOTLOADER_INSTALL=value: the bootloader to be installed
+ **(MANDATORY)** for non-x86 systems
+
+ allowed values =
+ - 'extlinux' (default) - the extlinux bootloader will
+ be installed
+ - 'none' - no bootloader will be installed by `morph deploy`. A
+ bootloader must be installed manually. This value must be used when
+ deploying non-x86 systems such as ARM.
+
+ * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used.
+ If not specified for x86-32 and x86-64 systems, 'extlinux' will be used
+
+ allowed values =
+ - 'extlinux'
+
+ * KERNEL_ARGS=args: optional additional kernel command-line parameters to
+ be appended to the default set. The default set is:
+
+ 'rw init=/sbin/init rootfstype=btrfs \
+ rootflags=subvol=systems/default/run \
+ root=[name or UUID of root filesystem]'
+
+ (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt)
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/old/extensions/mason.configure b/old/extensions/mason.configure
new file mode 100644
index 00000000..40fdfe46
--- /dev/null
+++ b/old/extensions/mason.configure
@@ -0,0 +1,153 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to fully configure
+# a Mason instance at deployment time. It uses the following variables
+# from the environment:
+#
+# * ARTIFACT_CACHE_SERVER
+# * MASON_CLUSTER_MORPHOLOGY
+# * MASON_DEFINITIONS_REF
+# * MASON_DISTBUILD_ARCH
+# * MASON_TEST_HOST
+# * OPENSTACK_NETWORK_ID
+# * TEST_INFRASTRUCTURE_TYPE
+# * TROVE_HOST
+# * TROVE_ID
+# * CONTROLLERHOST
+
+set -e
+
+##########################################################################
+# Copy Mason files into root filesystem
+##########################################################################
+
+ROOT="$1"
+
+mkdir -p "$ROOT"/usr/lib/mason
+cp extensions/mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh
+cp extensions/mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh
+cp extensions/mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script
+
+cp extensions/mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer
+
+cp extensions/mason/mason.service "$ROOT"/etc/systemd/system/mason.service
+
+##########################################################################
+# Set up httpd web server
+##########################################################################
+
+cp extensions/mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service
+
+mkdir -p "$ROOT"/srv/mason
+
+cat >>"$ROOT"/etc/httpd.conf <<EOF
+.log:text/plain
+EOF
+
+mkdir -p "$ROOT"/var/mason
+
+##########################################################################
+# Copy files needed for Ansible configuration
+##########################################################################
+
+mkdir -p "$ROOT/usr/share/mason-setup"
+mkdir -p "$ROOT/usr/lib/mason-setup"
+
+cp extensions/mason/share/* "$ROOT/usr/share/mason-setup"
+cp -r extensions/mason/ansible "$ROOT/usr/lib/mason-setup/"
+cp extensions/mason/mason-setup.service "$ROOT"/etc/systemd/system/mason-setup.service
+
+ln -s ../mason-setup.service "$ROOT"/etc/systemd/system/multi-user.target.wants/mason-setup.service
+
+##########################################################################
+# Check variables
+##########################################################################
+
+if [ -n "$MASON_GENERIC" ]; then
+ echo Not configuring Mason, it will be generic
+ exit 0
+fi
+
+if [ -z "$MASON_CLUSTER_MORPHOLOGY" -a \
+ -z "$MASON_DEFINITIONS_REF" -a \
+ -z "$MASON_DISTBUILD_ARCH" -a \
+ -z "$MASON_TEST_HOST" ]; then
+ # No Mason options defined, do nothing.
+ exit 0
+fi
+
+if [ -z "$ARTIFACT_CACHE_SERVER" -o \
+ -z "$CONTROLLERHOST" -o \
+ -z "$MASON_CLUSTER_MORPHOLOGY" -o \
+ -z "$MASON_DEFINITIONS_REF" -o \
+ -z "$MASON_DISTBUILD_ARCH" -o \
+ -z "$MASON_TEST_HOST" -o \
+ -z "$TROVE_HOST" -o \
+ -z "$TROVE_ID" ]; then
+ echo Some options required for Mason were defined, but not all.
+ exit 1
+fi
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+MASON_DATA="$ROOT/etc/mason"
+mkdir -p "$MASON_DATA"
+
+python <<'EOF' >"$MASON_DATA/mason.conf"
+import os, sys, yaml
+
+mason_configuration={
+ 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'],
+ 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'],
+ 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'],
+ 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'],
+ 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'],
+ 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'],
+ 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'],
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_HOST': os.environ['TROVE_HOST'],
+ 'CONTROLLERHOST': os.environ['CONTROLLERHOST'],
+}
+
+yaml.dump(mason_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
+ python <<'EOF' >>"$MASON_DATA/mason.conf"
+import os, sys, yaml
+
+openstack_credentials={
+ 'OS_USERNAME': os.environ['OPENSTACK_USER'],
+ 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'],
+ 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'],
+ 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'],
+ 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'],
+}
+
+yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False)
+EOF
+fi
+
+##########################################################################
+# Enable services
+##########################################################################
+
+ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer
+ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service
diff --git a/old/extensions/mason/ansible/hosts b/old/extensions/mason/ansible/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/old/extensions/mason/ansible/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/old/extensions/mason/ansible/mason-setup.yml b/old/extensions/mason/ansible/mason-setup.yml
new file mode 100644
index 00000000..d1528dbb
--- /dev/null
+++ b/old/extensions/mason/ansible/mason-setup.yml
@@ -0,0 +1,83 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/mason/mason.conf"
+ tasks:
+
+
+ - fail: msg='TROVE_ID is mandatory'
+ when: TROVE_ID is not defined
+
+ - fail: msg='TROVE_HOST is mandatory'
+ when: TROVE_HOST is not defined
+
+ - fail: msg='ARTIFACT_CACHE_SERVER is mandatory'
+ when: ARTIFACT_CACHE_SERVER is not defined
+
+ - fail: msg='MASON_CLUSTER_MORPHOLOGY is mandatory'
+ when: MASON_CLUSTER_MORPHOLOGY is not defined
+
+ - fail: msg='MASON_DEFINITIONS_REF is mandatory'
+ when: MASON_DEFINITIONS_REF is not defined
+
+ - fail: msg='MASON_DISTBUILD_ARCH is mandatory'
+ when: MASON_DISTBUILD_ARCH is not defined
+
+ - fail: msg='MASON_TEST_HOST is mandatory'
+ when: MASON_TEST_HOST is not defined
+
+ - fail: msg='CONTROLLERHOST is mandatory'
+ when: CONTROLLERHOST is not defined
+
+ - fail: msg='TEST_INFRASTRUCTURE_TYPE is mandatory'
+ when: TEST_INFRASTRUCTURE_TYPE is not defined
+
+ - fail: msg='OPENSTACK_NETWORK_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OPENSTACK_NETWORK_ID is not defined
+
+ - fail: msg='OS_USERNAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_USERNAME is not defined
+
+ - fail: msg='OS_PASSWORD is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_PASSWORD is not defined
+
+ - fail: msg='OS_TENANT_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_ID is not defined
+
+ - fail: msg='OS_TENANT_NAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_NAME is not defined
+
+ - fail: msg='OS_AUTH_URL is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack'
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_AUTH_URL is not defined
+
+ - name: Create the Mason configuration file
+ template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - mason.conf
+
+ - name: Create the OpenStack credentials file
+ template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - os.conf
+ when: TEST_INFRASTRUCTURE_TYPE == "openstack"
+
+ - name: Enable the mason service
+ service: name=mason.service enabled=yes
+ register: mason_service
+ - name: Restart the mason service
+ service: name=mason.service state=restarted
+ when: mason_service|changed
+
+ - name: Enable the mason timer
+ service: name=mason.timer enabled=yes
+ register: mason_timer
+ - name: Restart the mason timer
+ service: name=mason.timer state=restarted
+ when: mason_timer|changed
+
+ - name: Enable the httpd service
+ service: name=httpd.service enabled=yes
+ register: httpd_service
+ - name: Restart the httpd service
+ service: name=httpd state=restarted
+ when: httpd_service|changed
diff --git a/old/extensions/mason/httpd.service b/old/extensions/mason/httpd.service
new file mode 100644
index 00000000..7572b732
--- /dev/null
+++ b/old/extensions/mason/httpd.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=HTTP server for Mason
+After=network.target
+
+[Service]
+User=root
+ExecStart=/usr/sbin/httpd -f -p 80 -h /srv/mason
+
+[Install]
+WantedBy=multi-user.target
diff --git a/old/extensions/mason/mason-generator.sh b/old/extensions/mason/mason-generator.sh
new file mode 100755
index 00000000..187db72c
--- /dev/null
+++ b/old/extensions/mason/mason-generator.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+set -e
+
+if [ "$#" -lt 5 -o "$#" -gt 6 -o "$1" == "-h" -o "$1" == "--help" ]; then
+ cat <<EOF
+Usage:
+ `basename $0` HOST_PREFIX UPSTREAM_TROVE_HOSTNAME VM_USER VM_HOST VM_PATH [HOST_POSTFIX]
+
+Where:
+ HOST_PREFIX -- Name of your Mason instance
+ e.g. "my-mason" to produce hostnames:
+ my-mason-trove and my-mason-controller
+ UPSTREAM_TROVE_HOSTNAME -- Upstream trove's hostname
+ VM_USER -- User on VM host for VM deployment
+ VM_HOST -- VM host for VM deployment
+ VM_PATH -- Path to store VM images in on VM host
+ HOST_POSTFIX -- e.g. ".example.com" to get
+ my-mason-trove.example.com
+
+This script makes deploying a Mason system simpler by automating
+the generation of keys for the systems to use, building of the
+systems, filling out the mason deployment cluster morphology
+template with useful values, and finally deploying the systems.
+
+To ensure that the deployed system can deploy test systems, you
+must supply an ssh key to the VM host. Do so with the following
+command:
+ ssh-copy-id -i ssh_keys-HOST_PREFIX/worker.key.pub VM_USER@VM_HOST
+
+To ensure that the mason can upload artifacts to the upstream trove,
+you must supply an ssh key to the upstream trove. Do so with the
+following command:
+ ssh-copy-id -i ssh_keys-HOST_PREFIX/id_rsa.key.pub root@UPSTREAM_TROVE_HOSTNAME
+
+EOF
+ exit 0
+fi
+
+
+HOST_PREFIX="$1"
+UPSTREAM_TROVE="$2"
+VM_USER="$3"
+VM_HOST="$4"
+VM_PATH="$5"
+HOST_POSTFIX="$6"
+
+sedescape() {
+ # Escape all non-alphanumeric characters
+ printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
+}
+
+
+##############################################################################
+# Key generation
+##############################################################################
+
+mkdir -p "ssh_keys-${HOST_PREFIX}"
+cd "ssh_keys-${HOST_PREFIX}"
+test -e mason.key || ssh-keygen -t rsa -b 2048 -f mason.key -C mason@TROVE_HOST -N ''
+test -e lorry.key || ssh-keygen -t rsa -b 2048 -f lorry.key -C lorry@TROVE_HOST -N ''
+test -e worker.key || ssh-keygen -t rsa -b 2048 -f worker.key -C worker@TROVE_HOST -N ''
+test -e id_rsa || ssh-keygen -t rsa -b 2048 -f id_rsa -C trove-admin@TROVE_HOST -N ''
+cd ../
+
+
+##############################################################################
+# Mason setup
+##############################################################################
+
+cp clusters/mason.morph mason-${HOST_PREFIX}.morph
+
+sed -i "s/red-box-v1/$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/ssh_keys/ssh_keys-$(sedescape "$HOST_PREFIX")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/upstream-trove/$(sedescape "$UPSTREAM_TROVE")/" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-user/$(sedescape "$VM_USER")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-host/$(sedescape "$VM_HOST")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/vm-path/$(sedescape "$VM_PATH")/g" "mason-$HOST_PREFIX.morph"
+sed -i "s/\.example\.com/$(sedescape "$HOST_POSTFIX")/g" "mason-$HOST_PREFIX.morph"
+
+
+##############################################################################
+# System building
+##############################################################################
+
+morph build systems/trove-system-x86_64.morph
+morph build systems/build-system-x86_64.morph
+
+
+##############################################################################
+# System deployment
+##############################################################################
+
+morph deploy mason-${HOST_PREFIX}.morph
+
+
+##############################################################################
+# Cleanup
+##############################################################################
+
+rm mason-${HOST_PREFIX}.morph
diff --git a/old/extensions/mason/mason-report.sh b/old/extensions/mason/mason-report.sh
new file mode 100755
index 00000000..f6cca0ef
--- /dev/null
+++ b/old/extensions/mason/mason-report.sh
@@ -0,0 +1,297 @@
+#!/bin/bash
+
+set -x
+
+. /etc/mason.conf
+
+REPORT_PATH=/var/mason/report.html
+SERVER_PATH=/srv/mason
+SERVER_REPORT_PATH="$SERVER_PATH/index.html"
+
+sed_escape() {
+ printf "%s\n" "$1" | sed -e 's/\W/\\&/g'
+}
+
+create_report() {
+cat > $REPORT_PATH <<'EOF'
+<html>
+<head>
+<meta charset="UTF-8">
+<meta http-equiv="refresh" content="60">
+<style>
+html, body {
+ margin: 0;
+ padding: 0;
+}
+p.branding {
+ background: black;
+ color: #fff;
+ padding: 0.4em;
+ margin: 0;
+ font-weight: bold;
+}
+h1 {
+ background: #225588;
+ color: white;
+ margin: 0;
+ padding: 0.6em;
+}
+table {
+ width: 90%;
+ margin: 1em auto 6em auto;
+ border: 1px solid black;
+ border-spacing: 0;
+}
+table tr.headings {
+ background: #555;
+ color: white;
+}
+table tr.pass {
+ background: #aaffaa;
+}
+table tr.pass:hover {
+ background: #bbffbb;
+}
+table tr.fail {
+ background: #ffaaaa;
+}
+table tr.fail:hover {
+ background: #ffbbbb;
+}
+table tr.nonet {
+ background: #ffdd99;
+}
+table tr.nonet:hover {
+ background: #ffeeaa;
+}
+table tr.progress {
+ background: #00CCFF;
+}
+table tr.progress:hover {
+ background: #91E9FF;
+}
+table tr.headings th {
+ font-weight: bold;
+ text-align: left;
+ padding: 3px 2px;
+}
+table td {
+ padding: 2px;
+}
+td.result {
+ font-weight: bold;
+ text-transform: uppercase;
+}
+td.result a {
+ text-decoration: none;
+}
+td.result a:before {
+ content: "âž« ";
+}
+tr.pass td.result a {
+ color: #252;
+}
+tr.pass td.result a:hover {
+ color: #373;
+}
+tr.fail td.result a {
+ color: #622;
+}
+tr.fail td.result a:hover {
+ color: #933;
+}
+tr.nonet td.result a {
+ color: #641;
+}
+tr.nonet td.result a:hover {
+ color: #962;
+}
+tr.progress td.result a {
+ color: #000066;
+}
+tr.progress td.result a:hover {
+ color: #0000CC;
+}
+td.ref {
+ font-family: monospace;
+}
+td.ref a {
+ color: #333;
+}
+td.ref a:hover {
+ color: #555;
+}
+table tr.pass td, table tr.fail td {
+ border-top: solid white 1px;
+}
+p {
+ margin: 1.3em;
+}
+code {
+ padding: 0.3em 0.5em;
+ background: #eee;
+ border: 1px solid #bbb;
+ border-radius: 1em;
+}
+#footer {
+ margin: 0;
+ background: #aaa;
+ color: #222;
+ border-top: #888 1px solid;
+ font-size: 80%;
+ padding: 0;
+ position: fixed;
+ bottom: 0;
+ width: 100%;
+ display: table;
+}
+#footer p {
+ padding: 1.3em;
+ display: table-cell;
+}
+#footer p code {
+ font-size: 110%;
+}
+#footer p.about {
+ text-align: right;
+}
+</style>
+</head>
+<body>
+<p class="branding">Mason</p>
+<h1>Baserock: Continuous Delivery</h1>
+<p>Build log of changes to <code>BRANCH</code> from <code>TROVE</code>. Most recent first.</p>
+<table>
+<tr class="headings">
+ <th>Started</th>
+ <th>Ref</th>
+ <th>Duration</th>
+ <th>Result</th>
+</tr>
+<!--INSERTION POINT-->
+</table>
+<div id="footer">
+<p>Last checked for updates at: <code>....-..-.. ..:..:..</code></p>
+<p class="about">Generated by Mason | Powered by Baserock</p>
+</div>
+</body>
+</html>
+EOF
+
+ sed -i 's/BRANCH/'"$(sed_escape "$1")"'/' $REPORT_PATH
+ sed -i 's/TROVE/'"$(sed_escape "$2")"'/' $REPORT_PATH
+}
+
+update_report() {
+ # Give function params sensible names
+ build_start_time="$1"
+ build_trove_host="$2"
+ build_ref="$3"
+ build_sha1="$4"
+ build_duration="$5"
+ build_result="$6"
+ report_path="$7"
+ build_log="$8"
+
+ # Generate template if report file is not there
+ if [ ! -f $REPORT_PATH ]; then
+ create_report $build_ref $build_trove_host
+ fi
+
+ # Build table row for insertion into report file
+ if [ "$build_result" = nonet ]; then
+ msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref">Failed to contact '"${build_trove_host}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="'"${build_log}"'">'"${build_result}"'</a></td></tr>'
+ else
+ msg='<tr class="'"${build_result}"'"><td>'"${build_start_time}"'</td><td class="ref"><a href="http://'"${build_trove_host}"'/cgi-bin/cgit.cgi/baserock/baserock/definitions.git/commit/?h='"${build_ref}"'&id='"${build_sha1}"'">'"${build_sha1}"'</a></td><td>'"${build_duration}s"'</td><td class="result"><a href="'"${build_log}"'">'"${build_result}"'</a></td></tr>'
+ fi
+
+ # Insert report line, newest at top
+ sed -i 's/<!--INSERTION POINT-->/<!--INSERTION POINT-->\n'"$(sed_escape "$msg")"'/' $report_path
+}
+
+update_report_time() {
+ # Give function params sensible names
+ build_start_time="$1"
+
+ # If the report file exists, update the last-checked-for-updates time
+ if [ -f $REPORT_PATH ]; then
+ sed -i 's/<code>....-..-.. ..:..:..<\/code>/<code>'"$(sed_escape "$build_start_time")"'<\/code>/' $REPORT_PATH
+ fi
+}
+
+START_TIME=`date +%Y-%m-%d\ %T`
+
+update_report_time "$START_TIME"
+cp "$REPORT_PATH" "$SERVER_PATH/index.html"
+
+logfile="$(mktemp)"
+
+#Update current.log symlink to point to the current build log
+ln -sf "$logfile" "$SERVER_PATH"/current.log
+
+#Copy current server report, to restore when result is "skip"
+cp "$SERVER_REPORT_PATH" "$SERVER_REPORT_PATH".bak
+
+update_report "$START_TIME" \
+ "$UPSTREAM_TROVE_ADDRESS" \
+ "$DEFINITIONS_REF" \
+ "" \
+ " - " \
+ "progress" \
+ "$SERVER_REPORT_PATH" \
+ "current.log"
+
+
+/usr/lib/mason/mason.sh 2>&1 | tee "$logfile"
+case "${PIPESTATUS[0]}" in
+0)
+ RESULT=pass
+ ;;
+33)
+ RESULT=skip
+ ;;
+42)
+ RESULT=nonet
+ ;;
+*)
+ RESULT=fail
+ ;;
+esac
+
+# TODO: Update page with last executed time
+if [ "$RESULT" = skip ]; then
+ # Restore copied server report, otherwise the 'progress' row will
+ # be still present with a broken link after we remove the $logfile
+ mv "$SERVER_REPORT_PATH".bak "$SERVER_REPORT_PATH"
+
+ rm "$logfile"
+ exit 0
+fi
+
+DURATION=$(( $(date +%s) - $(date --date="$START_TIME" +%s) ))
+SHA1="$(cd "/ws/mason-definitions-$DEFINITIONS_REF" && git rev-parse HEAD)"
+BUILD_LOG="log/${SHA1}--${START_TIME}.log"
+
+update_report "$START_TIME" \
+ "$UPSTREAM_TROVE_ADDRESS" \
+ "$DEFINITIONS_REF" \
+ "$SHA1" \
+ "$DURATION" \
+ "$RESULT" \
+ "$REPORT_PATH" \
+ "$BUILD_LOG"
+
+
+#
+# Copy report into server directory
+#
+
+cp "$REPORT_PATH" "$SERVER_REPORT_PATH"
+mkdir "$SERVER_PATH/log"
+mv "$logfile" "$SERVER_PATH/$BUILD_LOG"
+
+# Cleanup
+
+mkdir -p /srv/distbuild/remove
+find /srv/distbuild/ -not \( -name "remove" -o -name "trees.cache.pickle" \) -mindepth 1 -maxdepth 1 -exec mv '{}' /srv/distbuild/remove \;
+find /srv/distbuild/remove -delete
diff --git a/old/extensions/mason/mason-setup.service b/old/extensions/mason/mason-setup.service
new file mode 100644
index 00000000..60403bde
--- /dev/null
+++ b/old/extensions/mason/mason-setup.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Run mason-setup Ansible scripts
+Requires=network.target
+After=network.target
+Requires=opensshd.service
+After=opensshd.service
+
+# If there's a shared /var subvolume, it must be mounted before this
+# unit runs.
+Requires=local-fs.target
+After=local-fs.target
+
+ConditionPathExists=/etc/mason/mason.conf
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/mason-setup/ansible/hosts /usr/lib/mason-setup/ansible/mason-setup.yml
diff --git a/old/extensions/mason/mason.service b/old/extensions/mason/mason.service
new file mode 100644
index 00000000..d5c99498
--- /dev/null
+++ b/old/extensions/mason/mason.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Mason: Continuous Delivery Service
+After=mason-setup.service
+ConditionPathIsDirectory=/srv/distbuild
+
+[Service]
+User=root
+ExecStart=/usr/lib/mason/mason-report.sh
+WorkingDirectory=/srv/distbuild
+
+[Install]
+WantedBy=multi-user.target
diff --git a/old/extensions/mason/mason.sh b/old/extensions/mason/mason.sh
new file mode 100755
index 00000000..8b2cea5f
--- /dev/null
+++ b/old/extensions/mason/mason.sh
@@ -0,0 +1,90 @@
+#!/bin/sh
+
+# Load OpenStack credentials
+if [ -f "/etc/os.conf" ]; then
+ . /etc/os.conf
+fi
+
+set -e
+set -x
+
+# Load our deployment config
+. /etc/mason.conf
+
+mkdir -p /ws
+
+definitions_repo=/ws/mason-definitions-"$DEFINITIONS_REF"
+if [ ! -e "$definitions_repo" ]; then
+ git clone -b "$DEFINITIONS_REF" git://"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions "$definitions_repo"
+ cd "$definitions_repo"
+ git config user.name "$TROVE_ID"-mason
+ git config user.email "$TROVE_ID"-mason@$(hostname)
+else
+ cd "$definitions_repo"
+ SHA1_PREV="$(git rev-parse HEAD)"
+fi
+
+if ! git remote update origin; then
+ echo ERROR: Unable to contact trove
+ exit 42
+fi
+git clean -fxd
+git reset --hard origin/"$DEFINITIONS_REF"
+
+SHA1="$(git rev-parse HEAD)"
+
+if [ -f "$HOME/success" ] && [ "$SHA1" = "$SHA1_PREV" ]; then
+ echo INFO: No changes to "$DEFINITIONS_REF", nothing to do
+ exit 33
+fi
+
+rm -f "$HOME/success"
+
+echo INFO: Mason building: $DEFINITIONS_REF at $SHA1
+
+if ! "scripts/release-build" --no-default-configs \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --artifact-cache-server "http://$ARTIFACT_CACHE_SERVER:8080/" \
+ --controllers "$DISTBUILD_ARCH:$DISTBUILD_CONTROLLER_ADDRESS" \
+ "$BUILD_CLUSTER_MORPHOLOGY"; then
+ echo ERROR: Failed to build release images
+ echo Build logs for chunks:
+ find build-* -type f -exec echo {} \; -exec cat {} \;
+ exit 1
+fi
+
+releases_made="$(cd release && ls | wc -l)"
+if [ "$releases_made" = 0 ]; then
+ echo ERROR: No release images created
+ exit 1
+else
+ echo INFO: Created "$releases_made" release images
+fi
+
+if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then
+ "scripts/release-test-os" \
+ --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --trove-id "$TROVE_ID" \
+ --net-id "$OPENSTACK_NETWORK_ID" \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+elif [ "$TEST_INFRASTRUCTURE_TYPE" = "kvmhost" ]; then
+ "scripts/release-test" \
+ --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \
+ --trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --trove-id "$TROVE_ID" \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+fi
+
+"scripts/release-upload" --build-trove-host "$ARTIFACT_CACHE_SERVER" \
+ --arch "$DISTBUILD_ARCH" \
+ --log-level=debug --log="$HOME"/release-upload.log \
+ --public-trove-host "$UPSTREAM_TROVE_ADDRESS" \
+ --public-trove-username root \
+ --public-trove-artifact-dir /home/cache/artifacts \
+ --no-upload-release-artifacts \
+ "$BUILD_CLUSTER_MORPHOLOGY"
+
+echo INFO: Artifact upload complete for $DEFINITIONS_REF at $SHA1
+
+touch "$HOME/success"
diff --git a/old/extensions/mason/mason.timer b/old/extensions/mason/mason.timer
new file mode 100644
index 00000000..107dff97
--- /dev/null
+++ b/old/extensions/mason/mason.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Runs Mason continually with 1 min between calls
+
+[Timer]
+#Time between Mason finishing and calling it again
+OnUnitActiveSec=1min
+Unit=mason.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/old/extensions/mason/os-init-script b/old/extensions/mason/os-init-script
new file mode 100644
index 00000000..77afb926
--- /dev/null
+++ b/old/extensions/mason/os-init-script
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# This allows the test runner to know that cloud-init has completed the
+# disc resizing, and there is enough free space to continue.
+touch /root/cloud-init-finished
+
diff --git a/old/extensions/mason/share/mason.conf b/old/extensions/mason/share/mason.conf
new file mode 100644
index 00000000..1295ce84
--- /dev/null
+++ b/old/extensions/mason/share/mason.conf
@@ -0,0 +1,14 @@
+# This file is generarated by the mason-setup systemd unit.
+# If you want to change the configuration, change the configuration
+# in /etc/mason/mason.conf and restart the service.
+
+ARTIFACT_CACHE_SERVER={{ ARTIFACT_CACHE_SERVER|quote }}
+UPSTREAM_TROVE_ADDRESS={{ TROVE_HOST|quote }}
+DEFINITIONS_REF={{ MASON_DEFINITIONS_REF|quote }}
+DISTBUILD_ARCH={{ MASON_DISTBUILD_ARCH|quote }}
+DISTBUILD_CONTROLLER_ADDRESS={{ CONTROLLERHOST|quote }}
+TROVE_ID={{ TROVE_ID|quote }}
+BUILD_CLUSTER_MORPHOLOGY={{ MASON_CLUSTER_MORPHOLOGY|quote }}
+MASON_TEST_HOST={{ MASON_TEST_HOST|quote }}
+TEST_INFRASTRUCTURE_TYPE={{ TEST_INFRASTRUCTURE_TYPE|quote }}
+{% if OPENSTACK_NETWORK_ID is defined %}OPENSTACK_NETWORK_ID={{ OPENSTACK_NETWORK_ID|quote }}{% endif %}
diff --git a/old/extensions/mason/share/os.conf b/old/extensions/mason/share/os.conf
new file mode 100644
index 00000000..21ef398c
--- /dev/null
+++ b/old/extensions/mason/share/os.conf
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# A version of this file with the relevant information included can be
+# obtained by navigating to 'Access & Security' -> 'API Access' ->
+# 'Download OpenStack RC file' in The Horizon web interface of your
+# OpenStack. However, the file obtained from there sets OS_PASSWORD
+# such that it will ask the user for a password, so you will need to
+# change that for Mason to work automatically.
+#
+# With the addition of Keystone, to use an openstack cloud you should
+# authenticate against keystone, which returns a **Token** and **Service
+# Catalog**. The catalog contains the endpoint for all services the
+# user/tenant has access to - including nova, glance, keystone, swift.
+#
+# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
+# will use the 1.1 *compute api*
+export OS_AUTH_URL={{ OS_AUTH_URL|quote }}
+
+# With the addition of Keystone we have standardized on the term **tenant**
+# as the entity that owns the resources.
+export OS_TENANT_ID={{ OS_TENANT_ID|quote }}
+export OS_TENANT_NAME={{ OS_TENANT_NAME|quote }}
+
+# In addition to the owning entity (tenant), openstack stores the entity
+# performing the action as the **user**.
+export OS_USERNAME={{ OS_USERNAME|quote }}
+
+# With Keystone you pass the keystone password.
+export OS_PASSWORD={{ OS_PASSWORD|quote }}
+
diff --git a/old/extensions/moonshot-kernel.configure b/old/extensions/moonshot-kernel.configure
new file mode 100644
index 00000000..11d01751
--- /dev/null
+++ b/old/extensions/moonshot-kernel.configure
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to convert a plain
+# kernel Image to uImage, for an HP Moonshot m400 cartridge
+
+set -eu
+
+case "$MOONSHOT_KERNEL" in
+ True|yes)
+ echo "Converting kernel image for Moonshot"
+ mkimage -A arm -O linux -C none -T kernel -a 0x00080000 \
+ -e 0x00080000 -n Linux -d "$1/boot/vmlinux" "$1/boot/uImage"
+ ;;
+ *)
+ echo Unrecognised option "$MOONSHOT_KERNEL" to MOONSHOT_KERNEL
+ exit 1
+ ;;
+esac
diff --git a/old/extensions/nfsboot-server.configure b/old/extensions/nfsboot-server.configure
new file mode 100755
index 00000000..9fb48096
--- /dev/null
+++ b/old/extensions/nfsboot-server.configure
@@ -0,0 +1,58 @@
+#!/bin/sh
+#
+# Copyright (C) 2013-2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#
+# This is a "morph deploy" configuration extension to set up a server for
+# booting over nfs and tftp.
+set -e
+
+ROOT="$1"
+
+##########################################################################
+
+nfsboot_root=/srv/nfsboot
+tftp_root="$nfsboot_root"/tftp
+nfs_root="$nfsboot_root"/nfs
+mkdir -p "$ROOT$tftp_root" "$ROOT$nfs_root"
+
+install -D /dev/stdin "$ROOT/usr/lib/systemd/system/nfsboot-tftp.service" <<EOF
+[Unit]
+Description=tftp service for booting kernels
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/udpsvd -E 0 69 /usr/sbin/tftpd $tftp_root
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+for prefix in / /usr; do
+ for unit in nfsboot-tftp.service nfs-server.service; do
+ unit_path="${prefix}/lib/systemd/system/$unit"
+ if [ -e "$ROOT/$unit_path" ]; then
+ ln -s "../../../../$unit_path" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$unit"
+ fi
+ done
+done
+
+pxelinux_file="$ROOT/usr/share/syslinux/pxelinux.0"
+if [ -e "$pxelinux_file" ]; then
+ cp "$pxelinux_file" "$ROOT$tftp_root/pxelinux.0"
+fi
diff --git a/old/extensions/nfsboot.check b/old/extensions/nfsboot.check
new file mode 100755
index 00000000..0b2e6be7
--- /dev/null
+++ b/old/extensions/nfsboot.check
@@ -0,0 +1,96 @@
+#!/usr/bin/python2
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'nfsboot' write extension'''
+
+import os
+
+import writeexts
+
+
+class NFSBootCheckExtension(writeexts.WriteExtension):
+
+ _nfsboot_root = '/srv/nfsboot'
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ location = args[0]
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise writeexts.ExtensionError(
+ 'Upgrading is not currently supported for NFS deployments.')
+
+ hostname = os.environ.get('HOSTNAME', None)
+ if hostname is None:
+ raise writeexts.ExtensionError('You must specify a HOSTNAME.')
+ if hostname == 'baserock':
+ raise writeexts.ExtensionError('It is forbidden to nfsboot a '
+ 'system with hostname "%s"'
+ % hostname)
+
+ self.test_good_server(location)
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+ versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems',
+ version_label)
+ if self.version_exists(versioned_root, location):
+ raise writeexts.ExtensionError(
+ 'Root file system for host %s (version %s) already exists on '
+ 'the NFS server %s. Deployment aborted.' % (hostname,
+ version_label, location))
+
+ def test_good_server(self, server):
+ self.check_ssh_connectivity(server)
+
+ # Is an NFS server
+ try:
+ writeexts.ssh_runcmd(
+ 'root@%s' % server, ['test', '-e', '/etc/exports'])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('server %s is not an nfs server'
+ % server)
+ try:
+ writeexts.ssh_runcmd(
+ 'root@%s' % server, ['systemctl', 'is-enabled',
+ 'nfs-server.service'])
+
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('server %s does not control its '
+ 'nfs server by systemd' % server)
+
+ # TFTP server exports /srv/nfsboot/tftp
+ tftp_root = os.path.join(self._nfsboot_root, 'tftp')
+ try:
+ writeexts.ssh_runcmd(
+ 'root@%s' % server, ['test' , '-d', tftp_root])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('server %s does not export %s' %
+ (tftp_root, server))
+
+ def version_exists(self, versioned_root, location):
+ try:
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['test', '-d', versioned_root])
+ except writeexts.ExtensionError:
+ return False
+
+ return True
+
+
+NFSBootCheckExtension().run()
diff --git a/old/extensions/nfsboot.configure b/old/extensions/nfsboot.configure
new file mode 100755
index 00000000..6a68dc48
--- /dev/null
+++ b/old/extensions/nfsboot.configure
@@ -0,0 +1,30 @@
+#!/bin/sh
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Remove all networking interfaces. On nfsboot systems, eth0 is set up
+# during kernel init, and the normal ifup@eth0.service systemd unit
+# would break the NFS connection and cause the system to hang.
+
+
+set -e
+if [ "$NFSBOOT_CONFIGURE" ]; then
+ # Remove all networking interfaces but loopback
+ cat > "$1/etc/network/interfaces" <<EOF
+auto lo
+iface lo inet loopback
+EOF
+
+fi
diff --git a/old/extensions/nfsboot.write b/old/extensions/nfsboot.write
new file mode 100755
index 00000000..1256b56f
--- /dev/null
+++ b/old/extensions/nfsboot.write
@@ -0,0 +1,206 @@
+#!/usr/bin/python2
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to an nfsboot server
+
+*** DO NOT USE ***
+- This was written before 'proper' deployment mechanisms were in place
+It is unlikely to work at all and will not work correctly
+
+Use the pxeboot write extension instead
+
+***
+
+
+
+An nfsboot server is defined as a baserock system that has tftp and nfs
+servers running, the tftp server is exporting the contents of
+/srv/nfsboot/tftp/ and the user has sufficient permissions to create nfs roots
+in /srv/nfsboot/nfs/
+
+'''
+
+
+import glob
+import os
+import subprocess
+
+import writeexts
+
+
+class NFSBootWriteExtension(writeexts.WriteExtension):
+
+ '''Create an NFS root and kernel on TFTP during Morph's deployment.
+
+ The location command line argument is the hostname of the nfsboot server.
+ The user is expected to provide the location argument
+ using the following syntax:
+
+ HOST
+
+ where:
+
+ * HOST is the host of the nfsboot server
+
+ The extension will connect to root@HOST via ssh to copy the kernel and
+ rootfs, and configure the nfs server.
+
+ It requires root because it uses systemd, and reads/writes to /etc.
+
+ '''
+
+ _nfsboot_root = '/srv/nfsboot'
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ temp_root, location = args
+
+ version_label = os.getenv('VERSION_LABEL', 'factory')
+ hostname = os.environ['HOSTNAME']
+
+ versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems',
+ version_label)
+
+ self.copy_rootfs(temp_root, location, versioned_root, hostname)
+ self.copy_kernel(temp_root, location, versioned_root, version_label,
+ hostname)
+ self.configure_nfs(location, hostname)
+
+ def create_local_state(self, location, hostname):
+ statedir = os.path.join(self._nfsboot_root, hostname, 'state')
+ subdirs = [os.path.join(statedir, 'home'),
+ os.path.join(statedir, 'opt'),
+ os.path.join(statedir, 'srv')]
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['mkdir', '-p'] + subdirs)
+
+ def copy_kernel(self, temp_root, location, versioned_root, version,
+ hostname):
+ bootdir = os.path.join(temp_root, 'boot')
+ image_names = ['vmlinuz', 'zImage', 'uImage']
+ for name in image_names:
+ try_path = os.path.join(bootdir, name)
+ if os.path.exists(try_path):
+ kernel_src = try_path
+ break
+ else:
+ raise writeexts.ExtensionError(
+ 'Could not find a kernel in the system: none of '
+ '%s found' % ', '.join(image_names))
+
+ kernel_dest = os.path.join(versioned_root, 'orig', 'kernel')
+ rsync_dest = 'root@%s:%s' % (location, kernel_dest)
+ self.status(msg='Copying kernel')
+ subprocess.check_call(
+ ['rsync', '-s', kernel_src, rsync_dest])
+
+ # Link the kernel to the right place
+ self.status(msg='Creating links to kernel in tftp directory')
+ tftp_dir = os.path.join(self._nfsboot_root , 'tftp')
+ versioned_kernel_name = "%s-%s" % (hostname, version)
+ kernel_name = hostname
+ try:
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['ln', '-f', kernel_dest,
+ os.path.join(tftp_dir, versioned_kernel_name)])
+
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['ln', '-sf', versioned_kernel_name,
+ os.path.join(tftp_dir, kernel_name)])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('Could not create symlinks to the '
+ 'kernel at %s in %s on %s' %
+ (kernel_dest, tftp_dir, location))
+
+ def copy_rootfs(self, temp_root, location, versioned_root, hostname):
+ rootfs_src = temp_root + '/'
+ orig_path = os.path.join(versioned_root, 'orig')
+ run_path = os.path.join(versioned_root, 'run')
+
+ self.status(msg='Creating destination directories')
+ try:
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['mkdir', '-p', orig_path, run_path])
+ except writeexts.ExtensionError:
+ raise writexts.ExtensionError(
+ 'Could not create dirs %s and %s on %s'
+ % (orig_path, run_path, location))
+
+ self.status(msg='Creating \'orig\' rootfs')
+ subprocess.check_call(
+ ['rsync', '-asXSPH', '--delete', rootfs_src,
+ 'root@%s:%s' % (location, orig_path)])
+
+ self.status(msg='Creating \'run\' rootfs')
+ try:
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['rm', '-rf', run_path])
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['cp', '-al', orig_path, run_path])
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['rm', '-rf',
+ os.path.join(run_path, 'etc')])
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['cp', '-a',
+ os.path.join(orig_path, 'etc'),
+ os.path.join(run_path, 'etc')])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError('Could not create \'run\' rootfs'
+ ' from \'orig\'')
+
+ self.status(msg='Linking \'default\' to latest system')
+ try:
+ writeexts.ssh_runcmd('root@%s' % location,
+ ['ln', '-sfn', versioned_root,
+ os.path.join(self._nfsboot_root, hostname, 'systems',
+ 'default')])
+ except writeexts.ExtensionError:
+ raise writeexts.ExtensionError("Could not link 'default' to %s"
+ % versioned_root)
+
+ def configure_nfs(self, location, hostname):
+ exported_path = os.path.join(self._nfsboot_root, hostname)
+ exports_path = '/etc/exports'
+ # If that path is not already exported:
+ try:
+ writeexts.ssh_runcmd(
+ 'root@%s' % location, ['grep', '-q', exported_path,
+ exports_path])
+ except writeexts.ExtensionError:
+ ip_mask = '*'
+ options = 'rw,no_subtree_check,no_root_squash,async'
+ exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, options)
+ exports_append_sh = '''\
+set -eu
+target="$1"
+temp=$(mktemp)
+cat "$target" > "$temp"
+cat >> "$temp"
+mv "$temp" "$target"
+'''
+ writeexts.ssh_runcmd(
+ 'root@%s' % location,
+ ['sh', '-c', exports_append_sh, '--', exports_path],
+ feed_stdin=exports_string)
+ writeexts.ssh_runcmd(
+ 'root@%s' % location, ['systemctl', 'restart',
+ 'nfs-server.service'])
+
+
+NFSBootWriteExtension().run()
diff --git a/old/extensions/nfsboot.write.help b/old/extensions/nfsboot.write.help
new file mode 100644
index 00000000..186c479a
--- /dev/null
+++ b/old/extensions/nfsboot.write.help
@@ -0,0 +1,33 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ *** DO NOT USE ***
+ - This was written before 'proper' deployment mechanisms were in place.
+ It is unlikely to work at all, and will not work correctly.
+
+ Use the pxeboot write extension instead
+
+ ***
+ Deploy a system image and kernel to an nfsboot server.
+
+ An nfsboot server is defined as a baserock system that has
+ tftp and nfs servers running, the tftp server is exporting
+ the contents of /srv/nfsboot/tftp/ and the user has sufficient
+ permissions to create nfs roots in /srv/nfsboot/nfs/.
+
+ The `location` argument is the hostname of the nfsboot server.
+
+ The extension will connect to root@HOST via ssh to copy the
+ kernel and rootfs, and configure the nfs server.
diff --git a/old/extensions/openstack-ceilometer.configure b/old/extensions/openstack-ceilometer.configure
new file mode 100644
index 00000000..a98c4d73
--- /dev/null
+++ b/old/extensions/openstack-ceilometer.configure
@@ -0,0 +1,122 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool CEILOMETER_ENABLE_CONTROLLER
+check_bool CEILOMETER_ENABLE_COMPUTE
+
+if ! "$CEILOMETER_ENABLE_CONTROLLER" && \
+ ! "$CEILOMETER_ENABLE_COMPUTE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$CEILOMETER_SERVICE_USER" -o \
+ -z "$CEILOMETER_SERVICE_PASSWORD" -o \
+ -z "$CEILOMETER_DB_USER" -o \
+ -z "$CEILOMETER_DB_PASSWORD" -o \
+ -z "$METERING_SECRET" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$NOVA_VIRT_TYPE" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Ceilometer were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$CEILOMETER_ENABLE_COMPUTE" || "$CEILOMETER_ENABLE_CONTROLLER"; then
+ enable openstack-ceilometer-config-setup
+fi
+if "$CEILOMETER_ENABLE_COMPUTE"; then
+ enable openstack-ceilometer-compute
+fi
+if "$CEILOMETER_ENABLE_CONTROLLER"; then
+ enable openstack-ceilometer-db-setup
+ enable openstack-ceilometer-api
+ enable openstack-ceilometer-collector
+ enable openstack-ceilometer-notification
+ enable openstack-ceilometer-central
+ enable openstack-ceilometer-alarm-evaluator
+ enable openstack-ceilometer-alarm-notifier
+fi
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/ceilometer.conf"
+import os, sys, yaml
+
+ceilometer_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'CEILOMETER_SERVICE_PASSWORD': os.environ['CEILOMETER_SERVICE_PASSWORD'],
+ 'CEILOMETER_SERVICE_USER': os.environ['CEILOMETER_SERVICE_USER'],
+ 'CEILOMETER_DB_USER': os.environ['CEILOMETER_DB_USER'],
+ 'CEILOMETER_DB_PASSWORD': os.environ['CEILOMETER_DB_PASSWORD'],
+ 'METERING_SECRET': os.environ['METERING_SECRET'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'],
+}
+
+yaml.dump(ceilometer_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/openstack-cinder.configure b/old/extensions/openstack-cinder.configure
new file mode 100644
index 00000000..4c32e11a
--- /dev/null
+++ b/old/extensions/openstack-cinder.configure
@@ -0,0 +1,125 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool CINDER_ENABLE_CONTROLLER
+check_bool CINDER_ENABLE_COMPUTE
+check_bool CINDER_ENABLE_STORAGE
+
+if ! "$CINDER_ENABLE_CONTROLLER" && \
+ ! "$CINDER_ENABLE_COMPUTE" && \
+ ! "$CINDER_ENABLE_STORAGE"; then
+ exit 0
+fi
+
+if [ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$CINDER_DB_USER" -o \
+ -z "$CINDER_DB_PASSWORD" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$CINDER_SERVICE_USER" -o \
+ -z "$CINDER_SERVICE_PASSWORD" -o \
+ -z "$CINDER_DEVICE" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" ]; then
+ echo Some options required for Cinder were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_STORAGE"; then
+ enable iscsi-setup
+ enable target #target.service!
+ enable iscsid
+fi
+if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_CONTROLLER" || "$CINDER_ENABLE_STORAGE"; then
+ enable openstack-cinder-config-setup
+fi
+if "$CINDER_ENABLE_STORAGE"; then
+ enable openstack-cinder-lv-setup
+ enable lvm2-lvmetad
+ enable openstack-cinder-volume
+ enable openstack-cinder-backup
+ enable openstack-cinder-scheduler
+fi
+if "$CINDER_ENABLE_CONTROLLER"; then
+ enable openstack-cinder-db-setup
+ enable openstack-cinder-api
+fi
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/cinder.conf"
+import os, sys, yaml
+
+cinder_configuration={
+ 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER':os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN':os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'CINDER_DB_USER':os.environ['CINDER_DB_USER'],
+ 'CINDER_DB_PASSWORD':os.environ['CINDER_DB_PASSWORD'],
+ 'CONTROLLER_HOST_ADDRESS':os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'CINDER_SERVICE_USER':os.environ['CINDER_SERVICE_USER'],
+ 'CINDER_SERVICE_PASSWORD':os.environ['CINDER_SERVICE_PASSWORD'],
+ 'CINDER_DEVICE':os.environ['CINDER_DEVICE'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS':os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+}
+
+yaml.dump(cinder_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/openstack-glance.configure b/old/extensions/openstack-glance.configure
new file mode 100644
index 00000000..5da08895
--- /dev/null
+++ b/old/extensions/openstack-glance.configure
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool GLANCE_ENABLE_SERVICE
+
+if ! "$GLANCE_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$GLANCE_SERVICE_USER" -o \
+ -z "$GLANCE_SERVICE_PASSWORD" -o \
+ -z "$GLANCE_DB_USER" -o \
+ -z "$GLANCE_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Glance were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-glance-setup
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/glance.conf"
+import os, sys, yaml
+
+glance_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'GLANCE_SERVICE_PASSWORD': os.environ['GLANCE_SERVICE_PASSWORD'],
+ 'GLANCE_SERVICE_USER': os.environ['GLANCE_SERVICE_USER'],
+ 'GLANCE_DB_USER': os.environ['GLANCE_DB_USER'],
+ 'GLANCE_DB_PASSWORD': os.environ['GLANCE_DB_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(glance_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/openstack-ironic.configure b/old/extensions/openstack-ironic.configure
new file mode 100644
index 00000000..c77b1288
--- /dev/null
+++ b/old/extensions/openstack-ironic.configure
@@ -0,0 +1,157 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool IRONIC_ENABLE_SERVICE
+
+if ! "$IRONIC_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$IRONIC_SERVICE_USER" -o \
+ -z "$IRONIC_SERVICE_PASSWORD" -o \
+ -z "$IRONIC_DB_USER" -o \
+ -z "$IRONIC_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Ironic were defined, but not all.
+ exit 1
+fi
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-ironic-setup
+enable iscsi-setup
+enable target #target.service!
+enable iscsid
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/ironic.conf"
+import os, sys, yaml
+
+ironic_configuration={
+ 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'],
+ 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'],
+ 'IRONIC_DB_USER': os.environ['IRONIC_DB_USER'],
+ 'IRONIC_DB_PASSWORD': os.environ['IRONIC_DB_PASSWORD'],
+ 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER':os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+
+}
+
+yaml.dump(ironic_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+##########################################################################
+# Configure the TFTP service #
+##########################################################################
+
+tftp_root="/srv/tftp_root/" # trailing slash is essential
+mkdir -p "$ROOT/$tftp_root"
+
+install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.service" << 'EOF'
+[Unit]
+Description=tftp service for booting kernels
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+EnvironmentFile=/etc/tftp-hpa.conf
+ExecStart=/usr/sbin/in.tftpd $TFTP_OPTIONS ${TFTP_ROOT}
+StandardInput=socket
+StandardOutput=inherit
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.socket" << EOF
+[Unit]
+Description=Tftp server activation socket
+
+[Socket]
+ListenDatagram=$MANAGEMENT_INTERFACE_IP_ADDRESS:69
+FreeBind=yes
+
+[Install]
+WantedBy=sockets.target
+EOF
+
+install -D -m 644 /dev/stdin "$ROOT"/etc/tftp-hpa.conf << EOF
+TFTP_ROOT=$tftp_root
+TFTP_OPTIONS="-v -v -v -v -v --map-file $tftp_root/map-file"
+EOF
+
+install -D /dev/stdin -m 644 "$ROOT/$tftp_root"/map-file << EOF
+r ^([^/]) $tftp_root\1
+r ^/tftpboot/ $tftp_root\2
+EOF
+
+cp "$ROOT"/usr/share/syslinux/pxelinux.0 "$ROOT/$tftp_root"
+cp "$ROOT"/usr/share/syslinux/chain.c32 "$ROOT/$tftp_root"
+
diff --git a/old/extensions/openstack-keystone.configure b/old/extensions/openstack-keystone.configure
new file mode 100644
index 00000000..6b011b14
--- /dev/null
+++ b/old/extensions/openstack-keystone.configure
@@ -0,0 +1,123 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool KEYSTONE_ENABLE_SERVICE
+
+if ! "$KEYSTONE_ENABLE_SERVICE"; then
+ exit 0
+fi
+
+if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \
+ -z "$KEYSTONE_ADMIN_PASSWORD" -o \
+ -z "$KEYSTONE_DB_USER" -o \
+ -z "$KEYSTONE_DB_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" ]; then
+ echo Some options required for Keystone were defined, but not all.
+ exit 1
+fi
+
+python <<'EOF'
+import socket
+import sys
+import os
+
+try:
+ socket.inet_pton(socket.AF_INET, os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'])
+except:
+ print "Error: MANAGEMENT_INTERFACE_IP_ADDRESS is not a valid IP"
+ sys.exit(1)
+EOF
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+enable openstack-keystone-setup
+enable openstack-horizon-setup
+enable postgres-server-setup
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/keystone.conf"
+import os, sys, yaml
+
+keystone_configuration={
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+ 'KEYSTONE_ADMIN_PASSWORD': os.environ['KEYSTONE_ADMIN_PASSWORD'],
+ 'KEYSTONE_DB_USER': os.environ['KEYSTONE_DB_USER'],
+ 'KEYSTONE_DB_PASSWORD': os.environ['KEYSTONE_DB_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+}
+
+yaml.dump(keystone_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+python << 'EOF' > "$OPENSTACK_DATA/postgres.conf"
+import os, sys, yaml
+
+postgres_configuration={
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+}
+
+yaml.dump(postgres_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/openstack-network.configure b/old/extensions/openstack-network.configure
new file mode 100644
index 00000000..9128f845
--- /dev/null
+++ b/old/extensions/openstack-network.configure
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool NEUTRON_ENABLE_MANAGER
+check_bool NEUTRON_ENABLE_AGENT
+
+if ! "$NEUTRON_ENABLE_MANAGER" && ! "$NEUTRON_ENABLE_AGENT"; then
+ exit 0
+fi
+
+###################
+# Enable services #
+###################
+
+enable openvswitch-setup
+enable openstack-network-setup
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/network.conf"
+import os, sys, yaml
+
+network_configuration = {}
+
+optional_keys = ('EXTERNAL_INTERFACE',)
+
+network_configuration.update((k, os.environ[k]) for k in optional_keys if k in os.environ)
+
+yaml.dump(network_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/openstack-neutron.configure b/old/extensions/openstack-neutron.configure
new file mode 100644
index 00000000..210222db
--- /dev/null
+++ b/old/extensions/openstack-neutron.configure
@@ -0,0 +1,138 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/openstack-neutron-$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-neutron-$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool NEUTRON_ENABLE_CONTROLLER
+check_bool NEUTRON_ENABLE_MANAGER
+check_bool NEUTRON_ENABLE_AGENT
+
+if ! "$NEUTRON_ENABLE_CONTROLLER" && \
+ ! "$NEUTRON_ENABLE_MANAGER" && \
+ ! "$NEUTRON_ENABLE_AGENT"; then
+ exit 0
+fi
+
+if [ -z "$NEUTRON_SERVICE_USER" -o \
+ -z "$NEUTRON_SERVICE_PASSWORD" -o \
+ -z "$NEUTRON_DB_USER" -o \
+ -z "$NEUTRON_DB_PASSWORD" -o \
+ -z "$METADATA_PROXY_SHARED_SECRET" -o \
+ -z "$NOVA_SERVICE_USER" -o \
+ -z "$NOVA_SERVICE_PASSWORD" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Neutron were defined, but not all.
+ exit 1
+fi
+
+#############################################
+# Ensure /var/run is an appropriate symlink #
+#############################################
+
+if ! link="$(readlink "$ROOT/var/run")" || [ "$link" != ../run ]; then
+ rm -rf "$ROOT/var/run"
+ ln -s ../run "$ROOT/var/run"
+fi
+
+###################
+# Enable services #
+###################
+
+if "$NEUTRON_ENABLE_CONTROLLER"; then
+ enable config-setup
+ enable db-setup
+ enable server
+fi
+
+if "$NEUTRON_ENABLE_MANAGER"; then
+ enable config-setup
+ enable ovs-cleanup
+ enable dhcp-agent
+ enable l3-agent
+ enable plugin-openvswitch-agent
+ enable metadata-agent
+fi
+
+if "$NEUTRON_ENABLE_AGENT"; then
+ enable config-setup
+ enable plugin-openvswitch-agent
+fi
+
+##########################################################################
+# Generate config variable shell snippet
+##########################################################################
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/neutron.conf"
+import os, sys, yaml
+
+nova_configuration={
+ 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'],
+ 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'],
+ 'NEUTRON_DB_USER': os.environ['NEUTRON_DB_USER'],
+ 'NEUTRON_DB_PASSWORD': os.environ['NEUTRON_DB_PASSWORD'],
+ 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'],
+ 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'],
+ 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+}
+
+yaml.dump(nova_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/openstack-nova.configure b/old/extensions/openstack-nova.configure
new file mode 100644
index 00000000..241d94c2
--- /dev/null
+++ b/old/extensions/openstack-nova.configure
@@ -0,0 +1,163 @@
+#!/bin/sh
+
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+ROOT="$1"
+
+enable(){
+ ln -sf "/usr/lib/systemd/system/openstack-nova-$1.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-$1.service"
+}
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True|'')
+ eval "$1=true"
+ ;;
+ False)
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+
+##########################################################################
+# Check variables
+##########################################################################
+
+check_bool NOVA_ENABLE_CONTROLLER
+check_bool NOVA_ENABLE_COMPUTE
+
+if ! "$NOVA_ENABLE_CONTROLLER" && \
+ ! "$NOVA_ENABLE_COMPUTE"; then
+ exit 0
+fi
+
+if [ -z "$NOVA_SERVICE_USER" -o \
+ -z "$NOVA_SERVICE_PASSWORD" -o \
+ -z "$NOVA_DB_USER" -o \
+ -z "$NOVA_DB_PASSWORD" -o \
+ -z "$NOVA_VIRT_TYPE" -o \
+ -z "$NEUTRON_SERVICE_USER" -o \
+ -z "$NEUTRON_SERVICE_PASSWORD" -o \
+ -z "$IRONIC_SERVICE_USER" -a \
+ -z "$IRONIC_SERVICE_PASSWORD" -a \
+ -z "$METADATA_PROXY_SHARED_SECRET" -o \
+ -z "$RABBITMQ_HOST" -o \
+ -z "$RABBITMQ_USER" -o \
+ -z "$RABBITMQ_PASSWORD" -o \
+ -z "$RABBITMQ_PORT" -o \
+ -z "$CONTROLLER_HOST_ADDRESS" -o \
+ -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \
+ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then
+ echo Some options required for Nova were defined, but not all.
+ exit 1
+fi
+
+###############################################
+# Enable libvirtd and libvirt-guests services #
+###############################################
+
+wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants
+mkdir -p "$wants_dir"
+mkdir -p "$ROOT"/var/lock/subsys
+ln -sf ../libvirtd.service "$wants_dir/libvirtd.service"
+
+######################################
+# Enable relevant openstack services #
+######################################
+
+if "$NOVA_ENABLE_CONTROLLER" || "$NOVA_ENABLE_COMPUTE"; then
+ enable config-setup
+fi
+if "$NOVA_ENABLE_CONTROLLER" && ! "$NOVA_ENABLE_COMPUTE"; then
+ enable conductor
+fi
+if "$NOVA_ENABLE_COMPUTE"; then
+ enable compute
+fi
+if "$NOVA_ENABLE_CONTROLLER"; then
+ for service in db-setup api cert consoleauth novncproxy scheduler serialproxy; do
+ enable "$service"
+ done
+fi
+
+
+##########################################################################
+# Generate configuration file
+##########################################################################
+
+case "$NOVA_BAREMETAL_SCHEDULING" in
+ True|true|yes)
+ export COMPUTE_MANAGER=ironic.nova.compute.manager.ClusteredComputeManager
+ export RESERVED_HOST_MEMORY_MB=0
+ export SCHEDULER_HOST_MANAGER=nova.scheduler.ironic_host_manager.IronicHostManager
+ export RAM_ALLOCATION_RATIO=1.0
+ export COMPUTE_DRIVER=nova.virt.ironic.IronicDriver
+ export SCHEDULER_USE_BAREMETAL_FILTERS=true
+ ;;
+ *)
+ export COMPUTE_MANAGER=nova.compute.manager.ComputeManager
+ export RESERVED_HOST_MEMORY_MB=512
+ export SCHEDULER_HOST_MANAGER=nova.scheduler.host_manager.HostManager
+ export RAM_ALLOCATION_RATIO=1.5
+ export COMPUTE_DRIVER=libvirt.LibvirtDriver
+ export SCHEDULER_USE_BAREMETAL_FILTERS=false
+ ;;
+esac
+
+OPENSTACK_DATA="$ROOT/etc/openstack"
+mkdir -p "$OPENSTACK_DATA"
+
+python <<'EOF' >"$OPENSTACK_DATA/nova.conf"
+import os, sys, yaml
+
+nova_configuration={
+ 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'],
+ 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'],
+ 'NOVA_DB_USER': os.environ['NOVA_DB_USER'],
+ 'NOVA_DB_PASSWORD': os.environ['NOVA_DB_PASSWORD'],
+ 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'],
+ 'COMPUTE_MANAGER': os.environ['COMPUTE_MANAGER'],
+ 'RESERVED_HOST_MEMORY_MB': os.environ['RESERVED_HOST_MEMORY_MB'],
+ 'SCHEDULER_HOST_MANAGER': os.environ['SCHEDULER_HOST_MANAGER'],
+ 'RAM_ALLOCATION_RATIO': os.environ['RAM_ALLOCATION_RATIO'],
+ 'SCHEDULER_USE_BAREMETAL_FILTERS': os.environ['SCHEDULER_USE_BAREMETAL_FILTERS'],
+ 'COMPUTE_DRIVER': os.environ['COMPUTE_DRIVER'],
+ 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'],
+ 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'],
+ 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'],
+ 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'],
+ 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'],
+ 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'],
+ 'RABBITMQ_USER': os.environ['RABBITMQ_USER'],
+ 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'],
+ 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'],
+ 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'],
+ 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'],
+ 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'],
+}
+
+yaml.dump(nova_configuration, sys.stdout, default_flow_style=False)
+EOF
diff --git a/old/extensions/openstack-swift-controller.configure b/old/extensions/openstack-swift-controller.configure
new file mode 100644
index 00000000..424ab57b
--- /dev/null
+++ b/old/extensions/openstack-swift-controller.configure
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+set -e
+
+export ROOT="$1"
+
+MANDATORY_OPTIONS="SWIFT_ADMIN_PASSWORD KEYSTONE_TEMPORARY_ADMIN_TOKEN"
+
+for option in $MANDATORY_OPTIONS
+do
+ if ! [[ -v $option ]]
+ then
+ missing_option=True
+ echo "Required option $option isn't set!" >&2
+ fi
+done
+
+if [[ $missing_option = True ]]; then exit 1; fi
+
+mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks
+
+ln -s "/usr/lib/systemd/system/swift-controller-setup.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-controller-setup.service"
+ln -s "/usr/lib/systemd/system/memcached.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/memcached.service"
+ln -s "/usr/lib/systemd/system/openstack-swift-proxy.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-proxy.service"
+
+cat << EOF > "$ROOT"/usr/share/openstack/swift-controller-vars.yml
+---
+SWIFT_ADMIN_PASSWORD: $SWIFT_ADMIN_PASSWORD
+MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS
+KEYSTONE_TEMPORARY_ADMIN_TOKEN: $KEYSTONE_TEMPORARY_ADMIN_TOKEN
+EOF
diff --git a/old/extensions/openstack-time.configure b/old/extensions/openstack-time.configure
new file mode 100644
index 00000000..4f5c8fbd
--- /dev/null
+++ b/old/extensions/openstack-time.configure
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+unnaceptable(){
+ eval echo Unexpected value \$$1 for $1 >&2
+ exit 1
+}
+
+check_bool(){
+ case "$(eval echo \"\$$1\")" in
+ True)
+ eval "$1=true"
+ ;;
+ False|'')
+ eval "$1=false"
+ ;;
+ *)
+ unnaceptable "$1"
+ ;;
+ esac
+}
+check_bool SYNC_TIME_WITH_CONTROLLER
+
+if "$SYNC_TIME_WITH_CONTROLLER"; then
+
+ cat << EOF > "$ROOT"/etc/ntpd.conf
+# We use iburst here to reduce the potential initial delay to set the clock
+server $CONTROLLER_HOST_ADDRESS iburst
+
+# kod - notify client when packets are denied service,
+# rather than just dropping the packets
+#
+# nomodify - deny queries which attempt to modify the state of the server
+#
+# notrap - decline to provide mode 6 control message trap service to
+# matching hosts
+#
+# see ntp.conf(5) for more details
+restrict -4 default limited limited nomodify
+restrict -6 default limited limited notrap nomodify
+EOF
+
+fi
diff --git a/old/extensions/openstack.check b/old/extensions/openstack.check
new file mode 100755
index 00000000..131ea8e8
--- /dev/null
+++ b/old/extensions/openstack.check
@@ -0,0 +1,92 @@
+#!/usr/bin/python2
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'openstack' write extension'''
+
+import os
+import urlparse
+
+import keystoneclient
+
+import writeexts
+
+
+class OpenStackCheckExtension(writeexts.WriteExtension):
+
+ def process_args(self, args):
+ if len(args) != 1:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise writeexts.ExtensionError(
+ 'Use the `ssh-rsync` write extension to deploy upgrades to an '
+ 'existing remote system.')
+
+ location = args[0]
+ self.check_location(location)
+
+ self.check_imagename()
+ self.check_openstack_parameters(self._get_auth_parameters(location))
+
+ def _get_auth_parameters(self, location):
+ '''Check the environment variables needed and returns all.
+
+ The environment variables are described in the class documentation.
+ '''
+
+ auth_keys = {'OPENSTACK_USER': 'username',
+ 'OPENSTACK_TENANT': 'tenant_name',
+ 'OPENSTACK_PASSWORD': 'password'}
+
+ for key in auth_keys:
+ if os.environ.get(key, '') == '':
+ raise writeexts.ExtensionError(key + ' was not given')
+
+ auth_params = {auth_keys[key]: os.environ[key] for key in auth_keys}
+ auth_params['auth_url'] = location
+ return auth_params
+
+ def check_imagename(self):
+ if os.environ.get('OPENSTACK_IMAGENAME', '') == '':
+ raise writeexts.ExtensionError(
+ 'OPENSTACK_IMAGENAME was not given')
+
+ def check_location(self, location):
+ x = urlparse.urlparse(location)
+ if x.scheme not in ['http', 'https']:
+ raise writeexts.ExtensionError(
+ 'URL schema must be http or https in %s' % location)
+ if (x.path != '/v2.0' and x.path != '/v2.0/'):
+ raise writeexts.ExtensionError(
+ 'API version must be v2.0 in %s' % location)
+
+ def check_openstack_parameters(self, auth_params):
+ ''' Check that we can connect to and authenticate with openstack '''
+
+ self.status(msg='Checking OpenStack credentials...')
+
+ try:
+ keystoneclient.v2_0.Client(**auth_params)
+ except keystoneclient.exceptions.Unauthorized:
+ errmsg = ('Failed to authenticate with OpenStack '
+ '(are your credentials correct?)')
+ raise writeexts.ExtensionError(errmsg)
+
+
+OpenStackCheckExtension().run()
diff --git a/old/extensions/openstack.write b/old/extensions/openstack.write
new file mode 100755
index 00000000..1fc3ba90
--- /dev/null
+++ b/old/extensions/openstack.write
@@ -0,0 +1,94 @@
+#!/usr/bin/python2
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to OpenStack.'''
+
+
+import os
+import subprocess
+import tempfile
+import urlparse
+
+import writeexts
+
+
+class OpenStackWriteExtension(writeexts.WriteExtension):
+
+ '''See openstack.write.help for documentation'''
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ temp_root, location = args
+
+ os_params = self.get_openstack_parameters()
+
+ fd, raw_disk = tempfile.mkstemp()
+ os.close(fd)
+ self.create_local_system(temp_root, raw_disk)
+ self.status(msg='Temporary disk image has been created at %s'
+ % raw_disk)
+
+ self.set_extlinux_root_to_virtio(raw_disk)
+
+ self.configure_openstack_image(raw_disk, location, os_params)
+
+ def set_extlinux_root_to_virtio(self, raw_disk):
+ '''Re-configures extlinux to use virtio disks'''
+ self.status(msg='Updating extlinux.conf')
+ with self.find_and_mount_rootfs(raw_disk) as mp:
+ path = os.path.join(mp, 'extlinux.conf')
+
+ with open(path) as f:
+ extlinux_conf = f.read()
+
+ extlinux_conf = extlinux_conf.replace('root=/dev/sda',
+ 'root=/dev/vda')
+ with open(path, "w") as f:
+ f.write(extlinux_conf)
+
+ def get_openstack_parameters(self):
+ '''Get the environment variables needed.
+
+ The environment variables are described in the class documentation.
+ '''
+
+ keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT',
+ 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD')
+ return (os.environ[key] for key in keys)
+
+ def configure_openstack_image(self, raw_disk, auth_url, os_params):
+ '''Configure the image in OpenStack using glance-client'''
+ self.status(msg='Configuring OpenStack image...')
+
+ username, tenant_name, image_name, password = os_params
+ cmdline = ['glance',
+ '--os-username', username,
+ '--os-tenant-name', tenant_name,
+ '--os-password', password,
+ '--os-auth-url', auth_url,
+ 'image-create',
+ '--name=%s' % image_name,
+ '--disk-format=raw',
+ '--container-format', 'bare',
+ '--file', raw_disk]
+ subprocess.check_call(cmdline)
+
+ self.status(msg='Image configured.')
+
+OpenStackWriteExtension().run()
diff --git a/old/extensions/openstack.write.help b/old/extensions/openstack.write.help
new file mode 100644
index 00000000..26983060
--- /dev/null
+++ b/old/extensions/openstack.write.help
@@ -0,0 +1,51 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Deploy a Baserock system as a *new* OpenStack virtual machine.
+ (Use the `ssh-rsync` write extension to deploy upgrades to an *existing*
+ VM)
+
+ Deploys the system to the OpenStack host using python-glanceclient.
+
+ Parameters:
+
+ * location: the authentication url of the OpenStack server using the
+ following syntax:
+
+ http://HOST:PORT/VERSION
+
+ where
+
+ * HOST is the host running OpenStack
+ * PORT is the port which is using OpenStack for authentications.
+ * VERSION is the authentication version of OpenStack (Only v2.0
+ supported)
+
+ * OPENSTACK_USER=username: the username to use in the `--os-username`
+ argument to `glance`.
+
+ * OPENSTACK_TENANT=tenant: the project name to use in the
+ `--os-tenant-name` argument to `glance`.
+
+ * OPENSTACK_IMAGENAME=imagename: the name of the image to use in the
+ `--name` argument to `glance`.
+
+ * OPENSTACK_PASSWORD=password: the password of the OpenStack user. (We
+ recommend passing this on the command-line, rather than setting an
+ environment variable or storing it in a cluster cluster definition file.)
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/old/extensions/partitioning.py b/old/extensions/partitioning.py
new file mode 100644
index 00000000..2a8de058
--- /dev/null
+++ b/old/extensions/partitioning.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python2
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+"""A module providing Baserock-specific partitioning functions"""
+
+import os
+import pyfdisk
+import re
+import subprocess
+import writeexts
+
+def do_partitioning(location, disk_size, temp_root, part_spec):
+ '''Perform partitioning
+
+ Perform partitioning using the pyfdisk.py module. Documentation
+ for this, and guidance on how to create a partition specification can
+ be found in extensions/pyfdisk.README
+
+ This function also validates essential parts of the partition layout
+
+ Args:
+ location: Path to the target device or image
+ temp_root: Location of the unpacked Baserock rootfs
+ part_spec: Path to a YAML formatted partition specification
+ Returns:
+ A pyfdisk.py Device object
+ Raises:
+ writeexts.ExtensionError
+ '''
+ # Create partition table and filesystems
+ try:
+ dev = pyfdisk.load_yaml(location, disk_size, part_spec)
+ writeexts.Extension.status(msg='Loaded partition specification: %s' %
+ part_spec)
+
+ # FIXME: GPT currently not fully supported due to missing tools
+ if dev.partition_table_format.lower() == 'gpt':
+ writeexts.Extension.status(msg='WARNING: GPT partition tables '
+ 'are not currently supported, '
+ 'when using the extlinux '
+ 'bootloader')
+
+ writeexts.Extension.status(msg='Summary:\n' + str(dev.partitionlist))
+ writeexts.Extension.status(msg='Writing partition table')
+ dev.commit()
+ dev.create_filesystems(skip=('/'))
+ except (pyfdisk.PartitioningError, pyfdisk.FdiskError) as e:
+ raise writeexts.ExtensionError(e.msg)
+
+ mountpoints = set(part.mountpoint for part in dev.partitionlist
+ if hasattr(part, 'mountpoint'))
+ if '/' not in mountpoints:
+ raise writeexts.ExtensionError('No partition with root '
+ 'mountpoint, please specify a '
+ 'partition with \'mountpoint: /\' '
+ 'in the partition specification')
+
+ mounted_partitions = set(part for part in dev.partitionlist
+ if hasattr(part, 'mountpoint'))
+
+ for part in mounted_partitions:
+ if not hasattr(part, 'filesystem'):
+ raise writeexts.ExtensionError('Cannot mount a partition '
+ 'without filesystem, please specify one '
+ 'for \'%s\' partition in the partition '
+ 'specification' % part.mountpoint)
+ if part.mountpoint == '/':
+ # Check that bootable flag is set for MBR devices
+ if (hasattr(part, 'boot')
+ and str(part.boot).lower() not in ('yes', 'true')
+ and dev.partition_table_format.lower() == 'mbr'):
+ writeexts.Extension.status(msg='WARNING: Boot partition '
+ 'needs bootable flag set to '
+ 'boot with extlinux/syslinux')
+
+ return dev
+
+def process_raw_files(dev, temp_root):
+ if hasattr(dev, 'raw_files'):
+ write_raw_files(dev.location, temp_root, dev)
+ for part in dev.partitionlist:
+ if hasattr(part, 'raw_files'):
+ # dd seek=n is used, which skips n blocks before writing,
+ # so we must skip n-1 sectors before writing in order to
+ # start writing files to the first block of the partition
+ write_raw_files(dev.location, temp_root, part,
+ (part.extent.start - 1) * dev.sector_size)
+
+def write_raw_files(location, temp_root, dev_or_part, start_offset=0):
+ '''Write files with `dd`'''
+ offset = 0
+ for raw_args in dev_or_part.raw_files:
+ r = RawFile(temp_root, start_offset, offset, **raw_args)
+ offset = r.next_offset
+ r.dd(location)
+
+
+class RawFile(object):
+ '''A class to hold information about a raw file to write to a device'''
+
+ def __init__(self, source_root,
+ start_offset=0, wr_offset=0,
+ sector_size=512, **kwargs):
+ '''Initialisation function
+
+ Args:
+ source_root: Base path for filenames
+ wr_offset: Offset to write to (and offset per-file offsets by)
+ sector_size: Device sector size (default: 512)
+ **kwargs:
+ file: A path to the file to write (combined with source_root)
+ offset_sectors: An offset to write to in sectors (optional)
+ offset_bytes: An offset to write to in bytes (optional)
+ '''
+ if 'file' not in kwargs:
+ raise writeexts.ExtensionError('Missing file name or path')
+ self.path = os.path.join(source_root,
+ re.sub('^/', '', kwargs['file']))
+
+ if not os.path.exists(self.path):
+ raise writeexts.ExtensionError('File not found: %s' % self.path)
+ elif os.path.isdir(self.path):
+ raise writeexts.ExtensionError('Can only dd regular files')
+ else:
+ self.size = os.stat(self.path).st_size
+
+ self.offset = start_offset
+ if 'offset_bytes' in kwargs:
+ self.offset += pyfdisk.human_size(kwargs['offset_bytes'])
+ elif 'offset_sectors' in kwargs:
+ self.offset += kwargs['offset_sectors'] * sector_size
+ else:
+ self.offset += wr_offset
+
+ self.skip = pyfdisk.human_size(kwargs.get('skip_bytes', 0))
+ self.count = pyfdisk.human_size(kwargs.get('count_bytes', self.size))
+
+ # Offset of the first free byte after this file (first byte of next)
+ self.next_offset = self.size + self.offset
+
+ def dd(self, location):
+ writeexts.Extension.status(msg='Writing %s at %d bytes' %
+ (self.path, self.offset))
+ subprocess.check_call(['dd', 'if=%s' % self.path,
+ 'of=%s' % location, 'bs=1',
+ 'seek=%d' % self.offset,
+ 'skip=%d' % self.skip,
+ 'count=%d' % self.count,
+ 'conv=notrunc'])
+ subprocess.check_call('sync')
diff --git a/old/extensions/pxeboot.check b/old/extensions/pxeboot.check
new file mode 100755
index 00000000..19891482
--- /dev/null
+++ b/old/extensions/pxeboot.check
@@ -0,0 +1,86 @@
+#!/usr/bin/python2
+
+import itertools
+import os
+import subprocess
+import sys
+flatten = itertools.chain.from_iterable
+
+def powerset(iterable):
+ "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+ s = list(iterable)
+ return flatten(itertools.combinations(s, r) for r in range(len(s)+1))
+
+valid_option_sets = frozenset((
+ ('spawn-novlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',))),
+ ('spawn-vlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', 'PXEBOOT_VLAN'))),
+ ('existing-dhcp', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',
+ 'PXEBOOT_CONFIG_TFTP_ADDRESS'))),
+ ('existing-server', frozenset(('PXEBOOT_CONFIG_TFTP_ADDRESS',
+ 'PXEBOOT_ROOTFS_RSYNC_ADDRESS'))),
+))
+valid_modes = frozenset(mode for mode, opt_set in valid_option_sets)
+
+
+def compute_matches(env):
+ complete_matches = set()
+ for mode, opt_set in valid_option_sets:
+ if all(k in env for k in opt_set):
+ complete_matches.add(opt_set)
+ return complete_matches
+
+complete_matches = compute_matches(os.environ)
+
+def word_separate_options(options):
+ assert options
+ s = options.pop(-1)
+ if options:
+ s = '%s and %s' % (', '.join(options), s)
+ return s
+
+
+valid_options = frozenset(flatten(opt_set for (mode, opt_set)
+ in valid_option_sets))
+matched_options = frozenset(o for o in valid_options
+ if o in os.environ)
+if not complete_matches:
+ addable_sets = frozenset(frozenset(os) - matched_options for os in
+ valid_options
+ if frozenset(os) - matched_options)
+ print('Please provide %s' % ' or '.join(
+ word_separate_options(list(opt_set))
+ for opt_set in addable_sets if opt_set))
+ sys.exit(1)
+elif len(complete_matches) > 1:
+ removable_sets = frozenset(matched_options - frozenset(os) for os in
+ powerset(matched_options)
+ if len(compute_matches(os)) == 1)
+ print('WARNING: Following options might not be needed: %s' % ' or '.join(
+ word_separate_options(list(opt_set))
+ for opt_set in removable_sets if opt_set))
+
+if 'PXEBOOT_MODE' in os.environ:
+ mode = os.environ['PXEBOOT_MODE']
+else:
+ try:
+ mode, = (mode for (mode, opt_set) in valid_option_sets
+ if all(o in os.environ for o in opt_set))
+
+ except ValueError as e:
+ print ('More than one candidate for PXEBOOT_MODE, please '
+ 'set a value for it. Type `morph help pxeboot.write for '
+ 'more info')
+ sys.exit(1)
+
+if mode not in valid_modes:
+ print('%s is not a valid PXEBOOT_MODE' % mode)
+ sys.exit(1)
+
+if mode != 'existing-server':
+ with open(os.devnull, 'w') as devnull:
+ if subprocess.call(['systemctl', 'is-active', 'nfs-server'],
+ stdout=devnull) != 0:
+ print ('ERROR: nfs-server.service is not running and is needed '
+ 'for this deployment. Please, run `systemctl start nfs-server` '
+ 'and try `morph deploy` again.')
+ sys.exit(1)
diff --git a/old/extensions/pxeboot.write b/old/extensions/pxeboot.write
new file mode 100644
index 00000000..20e4f6bd
--- /dev/null
+++ b/old/extensions/pxeboot.write
@@ -0,0 +1,756 @@
+#!/usr/bin/env python
+
+
+import collections
+import contextlib
+import errno
+import itertools
+import logging
+import os
+import select
+import signal
+import shutil
+import socket
+import string
+import StringIO
+import subprocess
+import sys
+import tempfile
+import textwrap
+import urlparse
+
+import writeexts
+
+def _int_to_quad_dot(i):
+ return '.'.join((
+ str(i >> 24 & 0xff),
+ str(i >> 16 & 0xff),
+ str(i >> 8 & 0xff),
+ str(i & 0xff)))
+
+
+def _quad_dot_to_int(s):
+ i = 0
+ for octet in s.split('.'):
+ i <<= 8
+ i += int(octet, 10)
+ return i
+
+
+def _netmask_to_prefixlen(mask):
+ bs = '{:032b}'.format(mask)
+ prefix = bs.rstrip('0')
+ if '0' in prefix:
+ raise ValueError('abnormal netmask: %s' %
+ _int_to_quad_dot(mask))
+ return len(prefix)
+
+
+def _get_routes():
+ routes = []
+ with open('/proc/net/route', 'r') as f:
+ for line in list(f)[1:]:
+ fields = line.split()
+ destination, flags, mask = fields[1], fields[3], fields[7]
+ flags = int(flags, 16)
+ if flags & 2:
+ # default route, ignore
+ continue
+ destination = socket.ntohl(int(destination, 16))
+ mask = socket.ntohl(int(mask, 16))
+ prefixlen = _netmask_to_prefixlen(mask)
+ routes.append((destination, prefixlen))
+ return routes
+
+
+class IPRange(object):
+ def __init__(self, prefix, prefixlen):
+ self.prefixlen = prefixlen
+ mask = (1 << prefixlen) - 1
+ self.mask = mask << (32 - prefixlen)
+ self.prefix = prefix & self.mask
+ @property
+ def bitstring(self):
+ return ('{:08b}' * 4).format(
+ self.prefix >> 24 & 0xff,
+ self.prefix >> 16 & 0xff,
+ self.prefix >> 8 & 0xff,
+ self.prefix & 0xff
+ )[:self.prefixlen]
+ def startswith(self, other_range):
+ return self.bitstring.startswith(other_range.bitstring)
+
+
+def find_subnet(valid_ranges, invalid_ranges):
+ for vr in valid_ranges:
+ known_subnets = set(ir for ir in invalid_ranges if ir.startswith(vr))
+ prefixlens = set(r.prefixlen for r in known_subnets)
+ prefixlens.add(32 - 2) # need at least 4 addresses in subnet
+ prefixlen = min(prefixlens)
+ if prefixlen <= vr.prefixlen:
+ # valid subnet is full, move on to next
+ continue
+ subnetlen = prefixlen - vr.prefixlen
+ for prefix in (subnetid + vr.prefix
+ for subnetid in xrange(1 << subnetlen)):
+ if any(subnet.prefix == prefix for subnet in known_subnets):
+ continue
+ return prefix, prefixlen
+
+
+def _normalise_macaddr(macaddr):
+ '''pxelinux.0 wants the mac address to be lowercase and - separated'''
+ digits = (c for c in macaddr.lower() if c in string.hexdigits)
+ nibble_pairs = grouper(digits, 2)
+ return '-'.join(''.join(byte) for byte in nibble_pairs)
+
+
+@contextlib.contextmanager
+def executor(target_pid):
+ 'Kills a process if its parent dies'
+ read_fd, write_fd = os.pipe()
+ helper_pid = os.fork()
+ if helper_pid == 0:
+ try:
+ os.close(write_fd)
+ while True:
+ rlist, _, _ = select.select([read_fd], [], [])
+ if read_fd in rlist:
+ d = os.read(read_fd, 1)
+ if not d:
+ os.kill(target_pid, signal.SIGKILL)
+ if d in ('', 'Q'):
+ os._exit(0)
+ else:
+ os._exit(1)
+ except BaseException as e:
+ import traceback
+ traceback.print_exc()
+ os._exit(1)
+ os.close(read_fd)
+ yield
+ os.write(write_fd, 'Q')
+ os.close(write_fd)
+
+
+def grouper(iterable, n, fillvalue=None):
+ "Collect data into fixed-length chunks or blocks"
+ # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
+ args = [iter(iterable)] * n
+ return itertools.izip_longest(*args, fillvalue=fillvalue)
+
+
+class PXEBoot(writeexts.WriteExtension):
+ @contextlib.contextmanager
+ def _vlan(self, interface, vlan):
+ viface = '%s.%s' % (interface, vlan)
+ self.status(msg='Creating vlan %(viface)s', viface=viface)
+ subprocess.check_call(['vconfig', 'add', interface, str(vlan)])
+ try:
+ yield viface
+ finally:
+ self.status(msg='Destroying vlan %(viface)s', viface=viface)
+ subprocess.call(['vconfig', 'rem', viface])
+
+ @contextlib.contextmanager
+ def _static_ip(self, iface):
+ valid_ranges = set((
+ IPRange(_quad_dot_to_int('192.168.0.0'), 16),
+ IPRange(_quad_dot_to_int('172.16.0.0'), 12),
+ IPRange(_quad_dot_to_int('10.0.0.0'), 8),
+ ))
+ invalid_ranges = set(IPRange(prefix, prefixlen)
+ for (prefix, prefixlen) in _get_routes())
+ prefix, prefixlen = find_subnet(valid_ranges, invalid_ranges)
+ netaddr = prefix
+ dhcp_server_ip = netaddr + 1
+ client_ip = netaddr + 2
+ broadcast_ip = prefix | ((1 << (32 - prefixlen)) - 1)
+ self.status(msg='Assigning ip address %(ip)s/%(prefixlen)d to '
+ 'iface %(iface)s',
+ ip=_int_to_quad_dot(dhcp_server_ip), prefixlen=prefixlen,
+ iface=iface)
+ subprocess.check_call(['ip', 'addr', 'add',
+ '{}/{}'.format(_int_to_quad_dot(dhcp_server_ip),
+ prefixlen),
+ 'broadcast', _int_to_quad_dot(broadcast_ip),
+ 'scope', 'global',
+ 'dev', iface])
+ try:
+ yield (dhcp_server_ip, client_ip, broadcast_ip)
+ finally:
+ self.status(msg='Removing ip addresses from iface %(iface)s',
+ iface=iface)
+ subprocess.call(['ip', 'addr', 'flush', 'dev', iface])
+
+ @contextlib.contextmanager
+ def _up_interface(self, iface):
+ self.status(msg='Bringing interface %(iface)s up', iface=iface)
+ subprocess.check_call(['ip', 'link', 'set', iface, 'up'])
+ try:
+ yield
+ finally:
+ self.status(msg='Bringing interface %(iface)s down', iface=iface)
+ subprocess.call(['ip', 'link', 'set', iface, 'down'])
+
+ @contextlib.contextmanager
+ def static_ip(self, interface):
+ with self._static_ip(iface=interface) as (host_ip, client_ip,
+ broadcast_ip), \
+ self._up_interface(iface=interface):
+ yield (_int_to_quad_dot(host_ip),
+ _int_to_quad_dot(client_ip),
+ _int_to_quad_dot(broadcast_ip))
+
+ @contextlib.contextmanager
+ def vlan(self, interface, vlan):
+ with self._vlan(interface=interface, vlan=vlan) as viface, \
+ self.static_ip(interface=viface) \
+ as (host_ip, client_ip, broadcast_ip):
+ yield host_ip, client_ip, broadcast_ip
+
+ @contextlib.contextmanager
+ def _tempdir(self):
+ td = tempfile.mkdtemp()
+ print 'Created tempdir:', td
+ try:
+ yield td
+ finally:
+ shutil.rmtree(td, ignore_errors=True)
+
+ @contextlib.contextmanager
+ def _remote_tempdir(self, hostname, template):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ td = writeexts.ssh_runcmd(
+ hostname, ['mktemp', '-d', template]).strip()
+ try:
+ yield td
+ finally:
+ if not persist:
+ writeexts.ssh_runcmd(hostname, ['find', td, '-delete'])
+
+ def _serve_tftpd(self, sock, host, port, interface, tftproot):
+ self.settings.progname = 'tftp server'
+ self._set_process_name()
+ while True:
+ logging.debug('tftpd waiting for connections')
+ # recvfrom with MSG_PEEK is how you accept UDP connections
+ _, peer = sock.recvfrom(0, socket.MSG_PEEK)
+ conn = sock
+ logging.debug('Connecting socket to peer: ' + repr(peer))
+ conn.connect(peer)
+ # The existing socket is now only serving that peer, so we need to
+ # bind a new UDP socket to the wildcard address, which needs the
+ # port to be in REUSEADDR mode.
+ conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ logging.debug('Binding replacement socket to ' + repr((host, port)))
+ sock.bind((host, port))
+
+ logging.debug('tftpd server handing connection to tftpd')
+ tftpd_serve = ['tftpd', '-rl', tftproot]
+ ret = subprocess.call(args=tftpd_serve, stdin=conn,
+ stdout=conn, stderr=None, close_fds=True)
+ # It's handy to turn off REUSEADDR after the rebinding,
+ # so we can protect against future bind attempts on this port.
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
+ logging.debug('tftpd exited %d' % ret)
+ os._exit(0)
+
+ @contextlib.contextmanager
+ def _spawned_tftp_server(self, tftproot, host_ip, interface, tftp_port=0):
+ # inetd-style launchers tend to bind UDP ports with SO_REUSEADDR,
+ # because they need to have multiple ports bound, one for recieving
+ # all connection attempts on that port, and one for each concurrent
+ # connection with a peer
+ # this makes detecting whether there's a tftpd running difficult, so
+ # we'll instead use an ephemeral port and configure the PXE boot to
+ # use that tftp server for the kernel
+ s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
+ s.bind((host_ip, tftp_port))
+ host, port = s.getsockname()
+ self.status(msg='Bound listen socket to %(host)s, %(port)s',
+ host=host, port=port)
+ pid = os.fork()
+ if pid == 0:
+ try:
+ self._serve_tftpd(sock=s, host=host, port=port,
+ interface=interface, tftproot=tftproot)
+ except BaseException as e:
+ import traceback
+ traceback.print_exc()
+ os._exit(1)
+ s.close()
+ with executor(pid):
+ try:
+ yield port
+ finally:
+ self.status(msg='Killing tftpd listener pid=%(pid)d',
+ pid=pid)
+ os.kill(pid, signal.SIGKILL)
+
+ @contextlib.contextmanager
+ def tftp_server(self, host_ip, interface, tftp_port=0):
+ with self._tempdir() as tftproot, \
+ self._spawned_tftp_server(tftproot=tftproot, host_ip=host_ip,
+ interface=interface,
+ tftp_port=tftp_port) as tftp_port:
+ self.status(msg='Serving tftp root %(tftproot)s, on port %(port)d',
+ port=tftp_port, tftproot=tftproot)
+ yield tftp_port, tftproot
+
+ @contextlib.contextmanager
+ def _local_copy(self, src, dst):
+ self.status(msg='Installing %(src)s to %(dst)s',
+ src=src, dst=dst)
+ shutil.copy2(src=src, dst=dst)
+ try:
+ yield
+ finally:
+ self.status(msg='Removing %(dst)s', dst=dst)
+ os.unlink(dst)
+
+ @contextlib.contextmanager
+ def _local_symlink(self, src, dst):
+ os.symlink(src, dst)
+ try:
+ yield
+ finally:
+ os.unlink(dst)
+
+ def local_pxelinux(self, tftproot):
+ return self._local_copy('/usr/share/syslinux/pxelinux.0',
+ os.path.join(tftproot, 'pxelinux.0'))
+
+ def local_kernel(self, rootfs, tftproot):
+ return self._local_copy(os.path.join(rootfs, 'boot/vmlinuz'),
+ os.path.join(tftproot, 'kernel'))
+
+ @contextlib.contextmanager
+ def _remote_copy(self, hostname, src, dst):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ with open(src, 'r') as f:
+ writeexts.ssh_runcmd(hostname,
+ ['install', '-D', '-m644',
+ '/proc/self/fd/0', dst],
+ stdin=f, stdout=None, stderr=None)
+ try:
+ yield
+ finally:
+ if not persist:
+ writeexts.ssh_runcmd(hostname, ['rm', dst])
+
+ @contextlib.contextmanager
+ def _remote_symlink(self, hostname, src, dst):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ writeexts.ssh_runcmd(hostname,
+ ['ln', '-s', '-f', src, dst],
+ stdin=None, stdout=None, stderr=None)
+ try:
+ yield
+ finally:
+ if not persist:
+ writeexts.ssh_runcmd(hostname, ['rm', '-f', dst])
+
+ @contextlib.contextmanager
+ def remote_kernel(self, rootfs, tftp_url, macaddr):
+ for name in ('vmlinuz', 'zImage', 'uImage'):
+ kernel_path = os.path.join(rootfs, 'boot', name)
+ if os.path.exists(kernel_path):
+ break
+ else:
+ raise writeexts.ExtensionError('Failed to locate kernel')
+ url = urlparse.urlsplit(tftp_url)
+ basename = '{}-kernel'.format(_normalise_macaddr(macaddr))
+ target_path = os.path.join(url.path, basename)
+ with self._remote_copy(hostname=url.hostname, src=kernel_path,
+ dst=target_path):
+ yield basename
+
+ @contextlib.contextmanager
+ def remote_fdt(self, rootfs, tftp_url, macaddr):
+ fdt_rel_path = os.environ.get('DTB_PATH', '')
+ if fdt_rel_path == '':
+ yield
+ fdt_abs_path = os.path.join(rootfs, fdt_rel_path)
+ if not fdt_abs_path:
+ raise writeexts.ExtensionError(
+ 'Failed to locate Flattened Device Tree')
+ url = urlparse.urlsplit(tftp_url)
+ basename = '{}-fdt'.format(_normalise_macaddr(macaddr))
+ target_path = os.path.join(url.path, basename)
+ with self._remote_copy(hostname=url.hostname, src=fdt_abs_path,
+ dst=target_path):
+ yield basename
+
+ @contextlib.contextmanager
+ def local_nfsroot(self, rootfs, target_ip):
+ nfsroot = target_ip + ':' + rootfs
+ self.status(msg='Exporting %(nfsroot)s as local nfsroot',
+ nfsroot=nfsroot)
+ subprocess.check_call(['exportfs', '-o', 'ro,insecure,no_root_squash',
+ nfsroot])
+ try:
+ yield
+ finally:
+ self.status(msg='Removing %(nfsroot)s from local nfsroots',
+ nfsroot=nfsroot)
+ subprocess.check_call(['exportfs', '-u', nfsroot])
+
+ @contextlib.contextmanager
+ def remote_nfsroot(self, rootfs, rsync_url, macaddr):
+ url = urlparse.urlsplit(rsync_url)
+ template = os.path.join(url.path,
+ _normalise_macaddr(macaddr) + '.XXXXXXXXXX')
+ with self._remote_tempdir(hostname=url.hostname, template=template) \
+ as tempdir:
+ nfsroot = urlparse.urlunsplit((url.scheme, url.netloc, tempdir,
+ url.query, url.fragment))
+ subprocess.check_call(['rsync', '-asSPH', '--delete',
+ rootfs, nfsroot],
+ stdin=None, stdout=open(os.devnull, 'w'),
+ stderr=None)
+ yield os.path.join(os.path.basename(tempdir),
+ os.path.basename(rootfs))
+
+ @staticmethod
+ def _write_pxe_config(fh, kernel_tftp_url, rootfs_nfs_url, device=None,
+ fdt_subpath=None, extra_args=''):
+
+ if device is None:
+ ip_cfg = "ip=dhcp"
+ else:
+ ip_cfg = "ip=:::::{device}:dhcp::".format(device=device)
+
+ fh.write(textwrap.dedent('''\
+ DEFAULT default
+ LABEL default
+ LINUX {kernel_url}
+ APPEND root=/dev/nfs {ip_cfg} nfsroot={rootfs_nfs_url} {extra_args}
+ ''').format(kernel_url=kernel_tftp_url, ip_cfg=ip_cfg,
+ rootfs_nfs_url=rootfs_nfs_url, extra_args=extra_args))
+ if fdt_subpath is not None:
+ fh.write("FDT {}\n".format(fdt_subpath))
+ fh.flush()
+
+ @contextlib.contextmanager
+ def local_pxeboot_config(self, tftproot, macaddr, ip, tftp_port,
+ nfsroot_dir, device=None):
+ kernel_tftp_url = 'tftp://{}:{}/kernel'.format(ip, tftp_port)
+ rootfs_nfs_url = '{}:{}'.format(ip, nfsroot_dir)
+ pxe_cfg_filename = _normalise_macaddr(macaddr)
+ pxe_cfg_path = os.path.join(tftproot, 'pxelinux.cfg', pxe_cfg_filename)
+ os.makedirs(os.path.dirname(pxe_cfg_path))
+ with open(pxe_cfg_path, 'w') as f:
+ self._write_pxe_config(fh=f, kernel_tftp_url=kernel_tftp_url,
+ rootfs_nfs_url=rootfs_nfs_url,
+ device=device,
+ extra_args=os.environ.get('KERNEL_ARGS',''))
+
+ try:
+ with self._local_symlink(
+ src=pxe_cfg_filename,
+ dst=os.path.join(tftproot,
+ 'pxelinux.cfg',
+ '01-' + pxe_cfg_filename)):
+ yield
+ finally:
+ os.unlink(pxe_cfg_path)
+
+ @contextlib.contextmanager
+ def remote_pxeboot_config(self, tftproot, kernel_tftproot, kernel_subpath,
+ fdt_subpath, rootfs_nfsroot, rootfs_subpath,
+ macaddr):
+ rootfs_nfs_url = '{}/{}'.format(rootfs_nfsroot, rootfs_subpath)
+ url = urlparse.urlsplit(kernel_tftproot)
+ kernel_tftp_url = '{}:{}'.format(url.netloc, kernel_subpath)
+ pxe_cfg_filename = _normalise_macaddr(macaddr)
+ url = urlparse.urlsplit(tftproot)
+ inst_cfg_path = os.path.join(url.path, 'pxelinux.cfg')
+ with tempfile.NamedTemporaryFile() as f:
+ self._write_pxe_config(
+ fh=f, kernel_tftp_url=kernel_tftp_url,
+ fdt_subpath=fdt_subpath,
+ rootfs_nfs_url=rootfs_nfs_url,
+ extra_args=os.environ.get('KERNEL_ARGS',''))
+ with self._remote_copy(
+ hostname=url.hostname, src=f.name,
+ dst=os.path.join(inst_cfg_path,
+ pxe_cfg_filename)), \
+ self._remote_symlink(
+ hostname=url.hostname,
+ src=pxe_cfg_filename,
+ dst=os.path.join(inst_cfg_path,
+ '01-' + pxe_cfg_filename)):
+ yield
+
+ @contextlib.contextmanager
+ def dhcp_server(self, interface, host_ip, target_ip, broadcast_ip):
+ with self._tempdir() as td:
+ leases_path = os.path.join(td, 'leases')
+ config_path = os.path.join(td, 'config')
+ stdout_path = os.path.join(td, 'stdout')
+ stderr_path = os.path.join(td, 'stderr')
+ pidfile_path = os.path.join(td, 'pid')
+ with open(config_path, 'w') as f:
+ f.write(textwrap.dedent('''\
+ start {target_ip}
+ end {target_ip}
+ interface {interface}
+ max_leases 1
+ lease_file {leases_path}
+ pidfile {pidfile_path}
+ boot_file pxelinux.0
+ option dns {host_ip}
+ option broadcast {broadcast_ip}
+ ''').format(**locals()))
+ with open(stdout_path, 'w') as stdout, \
+ open(stderr_path, 'w') as stderr:
+ sp = subprocess.Popen(['udhcpd', '-f', config_path], cwd=td,
+ stdin=open(os.devnull), stdout=stdout,
+ stderr=stderr)
+ try:
+ with executor(sp.pid):
+ yield
+ finally:
+ sp.terminate()
+
+ def get_interface_ip(self, interface):
+ ip_addresses = []
+ info = subprocess.check_output(['ip', '-o', '-f', 'inet', 'addr',
+ 'show', interface]).rstrip('\n')
+ if info:
+ tokens = collections.deque(info.split()[1:])
+ ifname = tokens.popleft()
+ while tokens:
+ tok = tokens.popleft()
+ if tok == 'inet':
+ address = tokens.popleft()
+ address, netmask = address.split('/')
+ ip_addresses.append(address)
+ elif tok == 'brd':
+ tokens.popleft() # not interested in broadcast address
+ elif tok == 'scope':
+ tokens.popleft() # not interested in scope tag
+ else:
+ continue
+ if not ip_addresses:
+ raise writeexts.ExtensionError('Interface %s has no addresses'
+ % interface)
+ if len(ip_addresses) > 1:
+ warnings.warn('Interface %s has multiple addresses, '
+ 'using first (%s)' % (interface, ip_addresses[0]))
+ return ip_addresses[0]
+
+ def ipmi_set_target_vlan(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ lan set 1 vlan id "$PXEBOOT_VLAN"
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please set the target\\'s vlan to $PXEBOOT_VLAN, \\
+ then enter \\"vlanned\\"
+ read
+ if [ "$REPLY" = vlanned ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_SET_VLAN_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_pxe_reboot_target(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN
+ default = textwrap.dedent('''\
+ set -- ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST"
+ "$@" chassis bootdev pxe
+ "$@" chassis power reset
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reboot the target in PXE mode, then\\
+ enter \\"pxe-booted\\"
+ read
+ if [ "$REPLY" = pxe-booted ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_PXE_REBOOT_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def wait_for_target_to_install(self):
+ command = os.environ.get(
+ 'PXEBOOT_WAIT_INSTALL_COMMAND',
+ textwrap.dedent('''\
+ while true; do
+ echo Please wait for the system to install, then \\
+ enter \\"installed\\"
+ read
+ if [ "$REPLY" = installed ]; then
+ break
+ fi
+ done
+ '''))
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_unset_target_vlan(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ lan set 1 vlan id off
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reset the target\\'s vlan, \\
+ then enter \\"unvlanned\\"
+ read
+ if [ "$REPLY" = unvlanned ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_UNSET_VLAN_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def ipmi_reboot_target(self):
+ if any(env_var.startswith('IPMI_') for env_var in os.environ):
+ # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST
+ default = textwrap.dedent('''\
+ ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\
+ chassis power reset
+ ''')
+ else:
+ default = textwrap.dedent('''\
+ while true; do
+ echo Please reboot the target, then\\
+ enter \\"rebooted\\"
+ read
+ if [ "$REPLY" = rebooted ]; then
+ break
+ fi
+ done
+ ''')
+ command = os.environ.get('PXEBOOT_REBOOT_COMMAND', default)
+ subprocess.check_call(['sh', '-euc', command, '-'])
+
+ def process_args(self, (temp_root, macaddr)):
+ interface = os.environ.get('PXEBOOT_DEPLOYER_INTERFACE', None)
+ target_interface = os.environ.get('PXEBOOT_TARGET_INTERFACE', None)
+ vlan = os.environ.get('PXEBOOT_VLAN')
+ if vlan is not None: vlan = int(vlan)
+ mode = os.environ.get('PXEBOOT_MODE')
+ if mode is None:
+ if interface:
+ if vlan is not None:
+ mode = 'spawn-vlan'
+ else:
+ if 'PXEBOOT_CONFIG_TFTP_ADDRESS' in os.environ:
+ mode = 'existing-dhcp'
+ else:
+ mode = 'spawn-novlan'
+ else:
+ mode = 'existing-server'
+ assert mode in ('spawn-vlan', 'spawn-novlan', 'existing-dhcp',
+ 'existing-server')
+ if mode == 'spawn-vlan':
+ with self.vlan(interface=interface, vlan=vlan) \
+ as (host_ip, target_ip, broadcast_ip), \
+ self.tftp_server(host_ip=host_ip, interface=interface) \
+ as (tftp_port, tftproot), \
+ self.local_pxelinux(tftproot=tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \
+ self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr,
+ device=target_interface,
+ ip=host_ip, tftp_port=tftp_port,
+ nfsroot_dir=temp_root), \
+ self.dhcp_server(interface=interface, host_ip=host_ip,
+ target_ip=target_ip,
+ broadcast_ip=broadcast_ip):
+ self.ipmi_set_target_vlan()
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_unset_target_vlan()
+ self.ipmi_reboot_target()
+ elif mode == 'spawn-novlan':
+ with self.static_ip(interface=interface) as (host_ip, target_ip,
+ broadcast_ip), \
+ self.tftp_server(host_ip=host_ip, interface=interface,
+ tftp_port=69) \
+ as (tftp_port, tftproot), \
+ self.local_pxelinux(tftproot=tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \
+ self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr,
+ device=target_interface,
+ ip=host_ip, tftp_port=tftp_port,
+ nfsroot_dir=temp_root), \
+ self.dhcp_server(interface=interface, host_ip=host_ip,
+ target_ip=target_ip,
+ broadcast_ip=broadcast_ip):
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ elif mode == 'existing-dhcp':
+ ip = self.get_interface_ip(interface)
+ config_tftpaddr = os.environ['PXEBOOT_CONFIG_TFTP_ADDRESS']
+ with self.tftp_server(ip=ip, interface=interface, tftp_port=69) \
+ as (tftp_port, tftproot), \
+ self.local_kernel(rootfs=temp_root, tftproot=tftproot), \
+ self.local_nfsroot(rootfs=temp_root, client_ip=''):
+ kernel_tftproot = 'tftp://{}:{}/'.format(ip, tftp_port)
+ rootfs_nfsroot = '{}:{}'.format(ip, temp_root)
+ with self.remote_pxeboot_config(
+ tftproot=config_tftpaddr,
+ kernel_tftproot=kernel_tftproot,
+ kernel_subpath='kernel',
+ rootfs_nfsroot=nfsroot,
+ rootfs_subpath='',
+ macaddr=macaddr):
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ elif mode == 'existing-server':
+ config_tftpaddr = os.environ[ 'PXEBOOT_CONFIG_TFTP_ADDRESS']
+ kernel_tftpaddr = os.environ.get('PXEBOOT_KERNEL_TFTP_ADDRESS',
+ config_tftpaddr)
+ url = urlparse.urlsplit(kernel_tftpaddr)
+ kernel_tftproot = os.environ.get('PXEBOOT_KERNEL_TFTP_ROOT',
+ 'tftp://%s/%s' % (url.hostname,
+ url.path))
+ rootfs_rsync = os.environ['PXEBOOT_ROOTFS_RSYNC_ADDRESS']
+ url = urlparse.urlsplit(rootfs_rsync)
+ nfsroot = os.environ.get('PXEBOOT_ROOTFS_NFSROOT',
+ '%s:%s' % (url.hostname, url.path))
+ with self.remote_kernel(rootfs=temp_root, tftp_url=kernel_tftpaddr,
+ macaddr=macaddr) as kernel_subpath, \
+ self.remote_fdt(rootfs=temp_root, tftp_url=kernel_tftpaddr,
+ macaddr=macaddr) as fdt_subpath, \
+ self.remote_nfsroot(rootfs=temp_root, rsync_url=rootfs_rsync, \
+ macaddr=macaddr) as rootfs_subpath, \
+ self.remote_pxeboot_config(tftproot=config_tftpaddr,
+ kernel_tftproot=kernel_tftproot,
+ kernel_subpath=kernel_subpath,
+ fdt_subpath=fdt_subpath,
+ rootfs_nfsroot=nfsroot,
+ rootfs_subpath=rootfs_subpath,
+ macaddr=macaddr):
+ persist = os.environ.get('PXE_INSTALLER') in ('no', 'False')
+ if not persist:
+ self.ipmi_pxe_reboot_target()
+ self.wait_for_target_to_install()
+ self.ipmi_reboot_target()
+ else:
+ writeexts.ExtensionError('Invalid PXEBOOT_MODE: %s' % mode)
+
+PXEBoot().run()
diff --git a/old/extensions/pxeboot.write.help b/old/extensions/pxeboot.write.help
new file mode 100644
index 00000000..7cb78bce
--- /dev/null
+++ b/old/extensions/pxeboot.write.help
@@ -0,0 +1,166 @@
+help: >
+ pxeboot.write extension.
+
+
+ This write extension will serve your generated system over NFS to
+ the target system.
+
+ In all modes `location` is the mac address of the interface that
+ the target will PXE boot from. This is used so that the target will
+ load the configuration file appropriate to it.
+
+
+ # `PXEBOOT_MODE`
+
+
+ It has 4 modes, which can be specified with PXEBOOT_MODE, or inferred
+ from which parameters are passed:
+
+
+ ## spawn-vlan
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_VLAN to configure
+ the target to pxeboot on a vlan and spawn a dhcp, nfs and tftp
+ server. This is potentially the fastest, since it doesn't need to
+ copy data to other servers.
+
+ This will create a vlan interface for the interface specified in
+ PXEBOOT_DEPLOYER_INTERFACE and spawn a dhcp server which serves
+ pxelinux.0, a configuration file and a kernel image from itself.
+
+ The configuration file informs the target to boot with a kernel
+ command-line that uses an NFS root served from the deployment host.
+
+
+ ## spawn-novlan
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE without PXEBOOT_VLAN to configure
+ like `spawn-vlan`, but without creating the vlan interface.
+
+ This assumes that you have exclusive access to the interface, such
+ as if you're plugged in to the device directly, or your interface
+ is vlanned by your infrastructure team.
+
+ This is required if you are serving from a VM and bridging it to the
+ correct network via macvtap. For this to work, you need to macvtap
+ bridge to a pre-vlanned interface on your host machine.
+
+
+ ## existing-dhcp
+
+
+ Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_CONFIG_TFTP_ADDRESS
+ to put config on an existing tftp server, already configured by the
+ dhcp server.
+
+ This spawns a tftp server and configures the local nfs server, but
+ doesn't spawn a dhcp server. This is useful if you have already got a
+ dhcp server that serves PXE images.
+
+ PXEBOOT_CONFIG_TFTP_ADDRESS is a URL in the form `sftp://$HOST/$PATH`.
+ The configuration file is copied to `$PATH/pxelinux.cfg/` on the
+ target identified by `$HOST`.
+
+
+ ## existing-server
+
+
+ Specify at least PXEBOOT_CONFIG_TFTP_ADDRESS and
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS to specify existing servers to copy
+ config, kernels and the rootfs to.
+
+ Configuration is copied to the target as `existing-dhcp`.
+
+ Specify PXEBOOT_KERNEL_TFTP_ADDRESS if the tftp server that the
+ kernel must be downloaded from is different to that of the pxelinux
+ configuration file.
+
+ PXEBOOT_ROOTFS_RSYNC_ADDRESS is a rsync URL describing where to copy
+ nfsroots to where they will be exported by the NFS server.
+
+ Specify PXEBOOT_ROOTFS_NFSROOT if the nfsroot appears as a different
+ address from the target's perspective.
+
+
+ # IPMI commands
+
+
+ After the PXE boot has been set up, the target needs to be rebooted
+ in PXE mode.
+
+ If the target is IPMI enabled, you can set `IPMI_USER`, `IPMI_HOST`
+ and `IPMI_PASSWORD` to make it reboot the target into netboot mode
+ automatically.
+
+ If they are not specified, then instructions will be displayed, and
+ `pxeboot.write` will wait for you to finish.
+
+ If there are command-line automation tools for rebooting the target
+ in netboot mode, then appropriate commands can be defined in the
+ following variables.
+
+
+ ## PXEBOOT_PXE_REBOOT_COMMAND
+
+
+ This command will be used to reboot the target device with its boot
+ device set to PXE boot.
+
+
+ ## PXEBOOT_REBOOT_COMMAND
+
+
+ This command will be used to reboot the target device in its default
+ boot mode.
+
+
+ ## PXEBOOT_WAIT_INSTALL_COMMAND
+
+
+ If it is possible for the target to notify you that it has finished
+ installing, you can put a command in here to wait for the event.
+
+
+ # Misc
+
+
+ ## KERNEL_ARGS
+
+
+ Additional kernel command line options. Note that the following
+ options
+
+ root=/dev/nfs ip=dhcp nfsroot=$NFSROOT`
+
+ are implicitly added by the extension.
+
+
+ ## DTB_PATH
+
+
+ Location in the deployed root filesystem of the Flattened Device
+ Tree blob (FDT) to use.
+
+
+ ## PXE_INSTALLER
+
+
+ If set to `no`, `False` or any other YAML value for false, the
+ remotely installed rootfs, kernel, bootloader config file and
+ device tree blob if specified, will not be removed after the
+ deployment finishes. This variable is only meanful on the
+ `existing-server` mode.
+
+
+ ## PXEBOOT_TARGET_INTERFACE
+
+ Name of the interface of the target to pxeboot from. Some targets
+ with more than one interface try to get the rootfs from a different
+ interface than the interface from where the pxeboot server is
+ reachable. Using this variable, the kernel arguments will be filled
+ to include the device.
+
+ Note that the name of this interface is the kernel's default name,
+ usually called ethX, and is non-determinisic.
diff --git a/old/extensions/pyfdisk.README b/old/extensions/pyfdisk.README
new file mode 100644
index 00000000..8b3b941b
--- /dev/null
+++ b/old/extensions/pyfdisk.README
@@ -0,0 +1,144 @@
+Introduction
+============
+
+The pyfdisk.py module provides a basic Python wrapper around command-line
+fdisk from util-linux, and some assorted related functions for querying
+information from real disks or disk images.
+
+
+YAML partition specification
+============================
+
+A YAML file may be loaded, using the function load_yaml(). This can contain
+all the information needed to create a Device object which can then be
+committed to disk.
+
+The format of this file is as follows:
+
+ start_offset: 2048
+ partition_table_format: gpt
+ partitions:
+ - description: boot
+ size: 1M
+ fdisk_type: 0x0B
+ filesystem: vfat
+ boot: yes
+ mountpoint: /boot
+ - description: rootfs
+ number: 3
+ size: 10G
+ filesystem: btrfs
+ fdisk_type: 0x83
+ mountpoint: /
+ - description: src
+ size: fill
+ filesystem: ext4
+ fdisk_type: 0x81
+ mountpoint: /src
+
+There are a couple of global attributes:
+
+* 'start_offset': specifies the start sector of the first partition on the
+ device (default: 2048)
+
+* 'partition_table_format': specifies the partition table format to be used
+ when creating the partition table. Possible format
+ strings are 'gpt', 'dos', or 'mbr' ('dos' and
+ 'mbr' are interchangeable). (default: gpt)
+
+Following this, up to 4 (for MBR) or 128 (for GPT) partitions can be
+specified, in the list, 'partitions'. For partitions, 'size', 'fdisk_type' and
+'filesystem' are required.
+
+* 'size' is the size in bytes, or 'fill', which will expand the partition to
+ fill any unused space. Multiple partitions with 'size: fill' will share the
+ free space on the device. Human readable formatting can be used: K, M, G, T,
+ for integer multiples (calculated as powers of 2^n)
+
+* 'fdisk_type' is the fdisk partition type, specified as a hexadecimal value
+ (default: 0x81)
+
+* 'filesystem' specifies a filesystem to be created on the partition. It can
+ be a filesystem with associated any mkfs.* tool, or 'none' for an
+ unformatted partition.
+
+Optional partition attributes include:
+
+* 'number' is optional, and can be used to override the numbering of
+ partitions, if it is desired to have partition numbering that differs from
+ the physical order of the partitions on the disk.
+ - For all un-numbered partitions, the physical order of partitions on the
+ device is determined by the order in which they appear in the
+ specification.
+ - For any partitions without a specified number, partition numbering is
+ handled automatically. In the example above, /boot is 1, /src is 2,
+ and / is 3, even though the physical order differs.
+
+* 'boot' sets the partition's bootable flag (currently only for MBR partition
+ tables)
+
+* 'mountpoint' specifies a mountpoint of a partition. One partition must
+ have a '/' mountpoint to contain the rootfs, otherwise this is optional.
+ Files present in the rootfs under the mount point for a given partition will
+ be copied to the created partition.
+
+load_yaml() produces a Device object, populated with any partitions contained
+in the specification.
+
+
+Objects
+=======
+
+Partition - An object containing properties of a partition
+
+Device - An object holding information about a physical device, and the
+ overall properties of the partitioning scheme. It contains a
+ PartitionList holding the partitions on the device.
+
+PartitionList - An object which holds a list of partitions on the disk. New
+ partitions can be added to the list at any time. When the list
+ is queried, properties of partitions which depend on the
+ properties of the other partitions in the list, for example
+ the size of a fill partition, or numbering, are recalculated,
+ and an updated copy of a Partition object is returned.
+
+Extent - An object which helps encapsulate sector dimensions for partitions
+ and devices.
+
+
+Quick start
+===========
+
+ >>> dev = pyfdisk.Device('test.img', 'fill')
+ >>> print dev
+ <Device: location=test.img, size=16777216, partitions: 0>
+ >>> part = pyfdisk.Partition(size='1M', fdisk_type=0x81, filesystem='ext4', mountpoint='/test1')
+ >>> part2 = pyfdisk.Partition(size='fill', filesystem='btrfs', mountpoint='/test2')
+ >>> dev.add_partition(part)
+ >>> dev.add_partition(part2)
+ >>> print dev.partitionlist
+ Partition
+ size: 14663168
+ fdisk type: 0x81
+ filesystem: btrfs
+ start: 4096
+ end: 32734
+ number: 2
+ mountpoint: /test2
+ Partition
+ size: 1048576
+ fdisk type: 0x81
+ filesystem: ext4
+ start: 2048
+ end: 4095
+ number: 1
+ mountpoint: /test1
+ >>> dev.commit()
+ Creating GPT partition table on test.img
+
+ $ fdisk -l test.img
+ Disk test.img: 16 MiB, 16777216 bytes, 32768 sectors
+ ...
+ Device Start End Sectors Size Type
+ test.img1 2048 4095 2048 1M Linux filesystem
+ test.img2 4096 32734 28639 14M Linux filesystem
diff --git a/old/extensions/pyfdisk.py b/old/extensions/pyfdisk.py
new file mode 100644
index 00000000..a7796729
--- /dev/null
+++ b/old/extensions/pyfdisk.py
@@ -0,0 +1,769 @@
+#!/usr/bin/env python2
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+A simple Python wrapper for fdisk
+
+ * Intends to have as few dependencies as possible, beyond command line fdisk
+ * Intends to work on Linux, though may work on other operating systems with
+ fdisk from util-linux.
+ * Provides for the creation of MBR and GPT partitioned images or devices
+ * Includes some utility functions for reading information from existing
+ partition tables
+
+Caveats:
+ * Designed to cater for disks using 4096 byte sectors, although this hasn't
+ been tested yet.
+"""
+
+import contextlib
+from copy import deepcopy
+import re
+import subprocess
+import time
+import yaml
+
+
+class Extent(object):
+ """
+ A class to hold start and end points for other objects
+
+ Start and end points are measured in sectors. This class transparently
+ handles the inclusive nature of the start and end sectors of blocks of
+ storage. It also allows extents to be aligned within other extents.
+ """
+
+ def __init__(self, start=0, length=0, end=0):
+ if length and not start:
+ raise PartitioningError('Extent requires a non-zero start '
+ 'point and length')
+ if start and length:
+ self.start = int(start)
+ self.end = int(start) + int(length) - 1
+ else:
+ self.start = int(start)
+ self.end = int(end)
+
+ self.filled_sectors = 0
+
+ def __max__(self):
+ return self.end
+
+ def __min__(self):
+ return self.start
+
+ def __len__(self):
+ return self.end - self.start + 1
+
+ def __add__(self, other):
+ """Return the sum of two extents"""
+ return Extent(start=self.start,
+ length=(len(self) + len(other)))
+
+ def __iadd__(self, other):
+ """+="""
+ self.end += len(other)
+ return self
+
+ def __gt__(self, other):
+ return len(self) > len(other)
+
+ def __lt__(self, other):
+ return not self > other
+
+ def __str__(self):
+ return ('<Extent: Start=%d, End=%d, Length=%d>' %
+ (self.start, self.end, len(self)))
+
+ def pack(self, other):
+ """
+ Return a new Extent aligned to self's first unused sector
+
+ This is done by length, to quantify fitting an area of disk space
+ inside the other. The filled space in self is calculated and updated.
+
+ Returns:
+ A new Extent, starting at the first available sector in `self`,
+ with the same length as `other`.
+ """
+ length_other = len(other)
+ first_free_sector = self.start + self.filled_sectors
+ if length_other + self.filled_sectors > len(self):
+ raise PartitioningError('Not enough free space to pack Extent')
+ self.filled_sectors += length_other
+ return Extent(start=first_free_sector, length=length_other)
+
+ def free_sectors(self):
+ return len(self) - self.filled_sectors
+
+
+class PartitionList(object):
+ """
+ An iterable object to contain and process a list of Partition objects
+
+ This class eases the calculation of partition sizes and numbering, since
+ the properties of a given partition depend on each of the other partitions
+ in the list.
+
+ Attributes:
+ device: A Device class containing the partition list
+ """
+
+ def __init__(self, device):
+ """
+ Initialisation function
+
+ Args:
+ device: A Device object
+ """
+ self.device = device
+ self.extent = device.extent
+
+ self.__cached_list_hash = 0
+
+ self.__partition_list = []
+ self.__iter_index = 0
+
+ def append(self, partition):
+ """Append a new Partition object to the list"""
+ partition.check()
+ if isinstance(partition, Partition):
+ for part in self.__partition_list:
+ dup_attrib = part.compare(partition)
+ if dup_attrib:
+ raise PartitioningError('Duplicated partition attribute '
+ '\'%s\'' % dup_attrib)
+ self.__partition_list.append(partition)
+ else:
+ raise PartitioningError('PartitionList can only '
+ 'contain Partition objects')
+
+ def __iter__(self):
+ """Return a copy of self as an iterable object"""
+ self.__iter_index = 0
+ copy = deepcopy(self)
+ return copy
+
+ def __next__(self):
+ """Return the next item in an iteration"""
+ if self.__iter_index == len(self.__partition_list):
+ raise StopIteration
+ else:
+ partition = self[self.__iter_index]
+ self.__iter_index += 1
+ return partition
+
+ def next(self):
+ """Provide a next() method for Python 2 compatibility"""
+ return self.__next__()
+
+ def __getitem__(self, i):
+ """Return an partition from the list, sorted by partition number"""
+ part_list = sorted(self.__update_partition_list(),
+ key=lambda part: part.number)
+ return part_list[i]
+
+ def free_sectors(self):
+ """Calculate the amount of unused space in the list"""
+ part_list = self.__update_partition_list()
+ self.extent.filled_sectors = 0
+ for part in part_list:
+ self.extent.pack(part.extent)
+ return self.extent.free_sectors()
+
+ def __update_partition_list(self):
+ """
+ Allocate extent and numbering for each Partition object in the list
+
+ A copy of the partition list is made so that any Partition object
+ returned from this list is a copy of a stored Partition object, thus
+ any partitions stored in the partition list remain intact even if a
+ copy is modified after is is returned. Hashing is used to avoid
+ updating the list when the partition list has not changed.
+ """
+ current_list_hash = hash(str(self.__partition_list))
+ if current_list_hash == self.__cached_list_hash:
+ return self.__cached_list
+
+ part_list = deepcopy(self.__partition_list)
+ used_numbers = set()
+ fill_partitions = set(partition for partition in part_list
+ if partition.size == 'fill')
+ requested_numbers = set(partition.number for partition in part_list
+ if hasattr(partition, 'number'))
+
+ # Get free space and the size of 'fill' partitions
+ self.extent.filled_sectors = 0
+ for part in part_list:
+ if part.size != 'fill':
+ extent = Extent(start=1,
+ length=self.get_length_sectors(part.size))
+ part.extent = extent
+ self.extent.pack(extent)
+
+ # Allocate aligned Extents and process partition numbers
+ if len(fill_partitions):
+ fill_size = self.extent.free_sectors() / len(fill_partitions)
+ # Set size of fill partitions
+ for part in fill_partitions:
+ part.size = fill_size * self.device.sector_size
+ part.extent = Extent(start=1, length=fill_size)
+
+ self.extent.filled_sectors = 0
+ for part in part_list:
+ part.extent = self.extent.pack(part.extent)
+
+ # Find the next unused partition number if not assigned
+ if hasattr(part, 'number'):
+ num = part.number
+ else:
+ for n in range(1, self.device.max_allowed_partitions + 1):
+ if n not in used_numbers and n not in requested_numbers:
+ num = n
+ break
+
+ part.number = num
+ used_numbers.add(num)
+
+ self.__cached_list_hash = current_list_hash
+ self.__cached_list = part_list
+ return part_list
+
+ def get_length_sectors(self, size_bytes):
+ """Get a length in sectors, aligned to 4096 byte boundaries"""
+ return (int(size_bytes) / self.device.sector_size +
+ ((int(size_bytes) % 4096) != 0) *
+ (4096 / self.device.sector_size))
+
+ def __str__(self):
+ string = ''
+ for part in self:
+ string = '%s\n%s\n' % (part, string)
+ return string.rstrip()
+
+ def __len__(self):
+ return len(self.__partition_list)
+
+ def __setitem__(self, i, value):
+ """Update the ith item in the list"""
+ self.append(partition)
+
+
+class Partition(object):
+ """
+ A class to describe a partition in a disk or image
+
+ The required attributes are loaded via kwargs.
+
+ Required attributes:
+ size: String describing the size of the partition in bytes
+ This may also be 'fill' to indicate that this partition should
+ be expanded to fill all unused space. Where there is more than
+ one fill partition, unused space is divided equally between the
+ fill partitions.
+ fdisk_type: An integer representing the hexadecimal code used by fdisk
+ to describe the partition type. Any partitions with
+ fdisk_type='none' create an area of unused space.
+
+ Optional attributes:
+ **kwargs: A mapping of any keyword arguments
+ filesystem: A string describing the filesystem format for the
+ partition, or 'none' to skip filesystem creation.
+ description: A string describing the partition, for documentation
+ boot: Boolean string describing whether to set the bootable flag
+ mountpoint: String describing the mountpoint for the partition
+ number: Number used to override partition numbering for the
+ partition (Possible only when using an MBR partition table)
+ """
+ def __init__(self, size=0, fdisk_type=0x81, filesystem='none', **kwargs):
+ if not size and 'size' not in kwargs:
+ raise PartitioningError('Partition must have a non-zero size')
+
+ self.filesystem = filesystem
+ self.fdisk_type = fdisk_type
+
+ self.size = human_size(size)
+ self.__dict__.update(**kwargs)
+
+ def check(self):
+ """Check for correctness"""
+ if self.fdisk_type == 'none':
+ if self.filesystem != 'none':
+ raise PartitioningError('Partition: Free space '
+ 'cannot have a filesystem')
+ if hasattr(self, 'mountpoint') and self.mountpoint != 'none':
+ raise PartitioningError('Partition: Free space '
+ 'cannot have a mountpoint')
+
+ def compare(self, other):
+ """Check for mutually exclusive attributes"""
+ non_duplicable = ('number', 'mountpoint')
+ for attrib in non_duplicable:
+ if hasattr(self, attrib) and hasattr(other, attrib):
+ if getattr(self, attrib) == getattr(other, attrib):
+ return attrib
+ return False
+
+ def __str__(self):
+ string = ('Partition\n'
+ ' size: %s\n'
+ ' fdisk type: %s\n'
+ ' filesystem: %s'
+ % (self.size,
+ hex(self.fdisk_type) if self.fdisk_type != 'none'
+ else 'none',
+ self.filesystem))
+ if hasattr(self, 'extent'):
+ string += (
+ '\n start: %s'
+ '\n end: %s'
+ % (self.extent.start, self.extent.end))
+ if hasattr(self, 'number'):
+ string += '\n number: %s' % self.number
+ if hasattr(self, 'mountpoint'):
+ string += '\n mountpoint: %s' % self.mountpoint
+ if hasattr(self, 'boot'):
+ string += '\n bootable: %s' % self.boot
+
+ return string
+
+
+class Device(object):
+ """
+ A class to describe a disk or image, and its partition layout
+
+ Attributes are loaded from **kwargs, containing key-value pairs describing
+ the required attributes. This can be loaded from a YAML file, using the
+ module function load_yaml().
+
+ Required attributes:
+ location: The location of the device or disk image
+ size: A size in bytes describing the total amount of space the
+ partition table on the device will occupy, or 'fill' to
+ automatically fill the available space.
+
+ Optional attributes:
+ **kwargs: A mapping of any keyword arguments
+ start_offset: The first 512 byte sector of the first partition
+ (default: 2048)
+ partition_table_format: A string describing the type of partition
+ table used on the device (default: 'gpt')
+ partitions: A list of mappings for the attributes for each Partition
+ object. update_partitions() populates the partition list
+ based on the contents of this attribute.
+ """
+ min_start_bytes = 1024**2
+
+ def __init__(self, location, size, **kwargs):
+
+ if 'partition_table_format' not in kwargs:
+ self.partition_table_format = 'gpt'
+ if 'start_offset' not in kwargs:
+ self.start_offset = 2048
+
+ target_size = get_disk_size(location)
+ if str(size).lower() == 'fill':
+ self.size = target_size
+ else:
+ self.size = human_size(size)
+
+ if self.size > target_size:
+ raise PartitioningError('Not enough space available on target')
+
+ if self.size <= self.min_start_bytes:
+ raise PartitioningError('Device size must be greater than %d '
+ 'bytes' % self.min_start_bytes)
+
+ # Get sector size
+ self.sector_size = get_sector_size(location)
+ self.location = location
+
+ # Populate Device attributes from keyword args
+ self.__dict__.update(**kwargs)
+
+ if self.partition_table_format.lower() == 'gpt':
+ self.max_allowed_partitions = 128
+ else:
+ self.max_allowed_partitions = 4
+
+ # Process Device size
+ start = (self.start_offset * 512) / self.sector_size
+ # Sector quantities in the specification are assumed to be 512 bytes
+ # This converts to the real sector size
+ if (start * self.sector_size) < self.min_start_bytes:
+ raise PartitioningError('Start offset should be greater than '
+ '%d, for %d byte sectors' %
+ (min_start_bytes / self.sector_size,
+ self.sector_size))
+ # Check the disk's first partition starts on a 4096 byte boundary
+ # this ensures alignment, and avoiding a reduction in performance
+ # on disks which use a 4096 byte physical sector size
+ if (start * self.sector_size) % 4096 != 0:
+ print('WARNING: Start sector is not aligned '
+ 'to 4096 byte sector boundaries')
+
+ # End sector is one sector less than the disk length
+ disk_end_sector = (self.size / self.sector_size) - 1
+ if self.partition_table_format == 'gpt':
+ # GPT partition table is duplicated at the end of the device.
+ # GPT header takes one sector, whatever the sector size,
+ # with a 16384 byte 'minimum' area for partition entries,
+ # supporting up to 128 partitions (128 bytes per entry).
+ # The duplicate GPT does not include the 'protective' MBR
+ gpt_size = ((16 * 1024) / self.sector_size) + 1
+ self.extent = Extent(start=start,
+ end=(disk_end_sector - gpt_size))
+ else:
+ self.extent = Extent(start=start, end=disk_end_sector)
+
+ self.update_partitions()
+
+ def update_partitions(self, partitions=None):
+ """
+ Reset list, populate with partitions from a list of attributes
+
+ Args:
+ partitions: A list of partition keyword attributes
+ """
+
+ self.partitionlist = PartitionList(self)
+ if partitions:
+ self.partitions = partitions
+ if hasattr(self, 'partitions'):
+ for partition_args in self.partitions:
+ self.add_partition(Partition(**partition_args))
+
+ def add_partition(self, partition):
+ """
+ Add a Partition object to the device's list of partitions
+
+ Args:
+ partition: a Partition class
+ """
+
+ if len(self.partitionlist) < self.max_allowed_partitions:
+ self.partitionlist.append(partition)
+ else:
+ raise PartitioningError('Exceeded maximum number of partitions '
+ 'for %s partition table (%d)' %
+ (self.partition_table_format.upper(),
+ self.max_allowed_partitions))
+
+ def get_partition_by_mountpoint(self, mountpoint):
+ """Return a Partition with a specified mountpoint"""
+
+ try:
+ return next(r for r in self.partitionlist
+ if hasattr(r, 'mountpoint')
+ and r.mountpoint == mountpoint)
+ except StopIteration:
+ return False
+
+ def commit(self):
+ """Write the partition table to the disk or image"""
+
+ pt_format = self.partition_table_format.lower()
+ print("Creating %s partition table on %s" %
+ (pt_format.upper(), self.location))
+
+ # Create a new partition table
+ if pt_format in ('mbr', 'dos'):
+ cmd = "o\n"
+ elif pt_format == 'gpt':
+ cmd = "g\n"
+ else:
+ raise PartitioningError('Unrecognised partition '
+ 'table type \'%s\'' % pt_format)
+
+ for partition in self.partitionlist:
+ # Create partitions
+ if str(partition.fdisk_type).lower() != 'none':
+ cmd += "n\n"
+ if pt_format in ('mbr', 'dos'):
+ cmd += "p\n"
+ cmd += (str(partition.number) + "\n"
+ "" + str(partition.extent.start) + "\n"
+ "" + str(partition.extent.end) + "\n")
+
+ # Set partition types
+ cmd += "t\n"
+ if partition.number > 1:
+ # fdisk does not ask for a partition
+ # number when setting the type of the
+ # first created partition
+ cmd += str(partition.number) + "\n"
+ cmd += str(hex(partition.fdisk_type)) + "\n"
+
+ # Set bootable flag
+ if hasattr(partition, 'boot') and pt_format == 'mbr':
+ if str(partition.boot).lower() in ('yes', 'true'):
+ cmd += "a\n"
+ if partition.number > 1:
+ cmd += str(partition.number) + "\n"
+
+ # Write changes
+ cmd += ("w\n"
+ "q\n")
+ p = subprocess.Popen(["fdisk", self.location],
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ output = p.communicate(cmd)
+
+ errors = output[1].split('\n')[1:-1]
+ if errors:
+ # Exception handling is done in this way since fdisk will not
+ # return a failure exit code if it finds problems with its input.
+ # Note that the message 'disk does not contain a valid partition
+ # table' is not an error, it's a status message printed to stderr
+ # when fdisk starts with a blank device.
+ raise FdiskError('"%s"' % ' '.join(str(x) for x in errors))
+
+ def get_partition_uuid(self, partition):
+ """Read a partition's UUID from disk (MBR or GPT)"""
+
+ return get_partition_uuid(self.location, partition.number,
+ self.partition_table_format)
+
+ def create_filesystems(self, skip=[]):
+ """Create filesystems on the disk or image
+
+ Args:
+ skip: An iterable of mountpoints identifying partitions to skip
+ filesystem creation on, for example if custom settings are
+ required
+ """
+
+ for part in self.partitionlist:
+ if hasattr(part, 'mountpoint') and part.mountpoint in skip:
+ continue
+ if part.filesystem.lower() != 'none':
+ with create_loopback(self.location,
+ part.extent.start * self.sector_size,
+ part.size) as loop:
+ print ('Creating %s filesystem on partition %s' %
+ (part.filesystem, part.number))
+ subprocess.check_output(['mkfs.' + part.filesystem, loop])
+
+ def __str__(self):
+ return ('<Device: location=%s, size=%s, partitions: %s>' %
+ (self.location, self.size, len(self.partitionlist)))
+
+
+class PartitioningError(Exception):
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class FdiskError(Exception):
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+def load_yaml(location, size, yaml_file):
+ """
+ Load partition data from a yaml specification
+
+ The YAML file describes the attributes documented in the Device
+ and Partition classes.
+
+ Args:
+ yaml_file: String path to a YAML file to load
+ location: Path to the device node or image to use for partitioning
+ size: The desired device size in bytes (may be 'fill' to occupy the
+ entire device
+
+ Returns:
+ A Device object
+ """
+
+ with open(yaml_file, 'r') as f:
+ kwargs = yaml.safe_load(f)
+ return Device(location, size, **kwargs)
+
+
+def get_sector_size(location):
+ """Get the logical sector size of a block device or image, in bytes"""
+
+ return int(__filter_fdisk_list_output('Sector size.*?(\d+) bytes',
+ location)[0])
+
+def get_disk_size(location):
+ """Get the total size of a block device or image, in bytes"""
+
+ return int(__filter_fdisk_list_output('Disk.*?(\d+) bytes',
+ location)[0])
+
+def get_partition_offsets(location):
+ """Return an array of the partition start sectors in a device or image"""
+
+ return __get_fdisk_list_numeric_column(location, 1)
+
+def get_partition_sector_sizes(location):
+ """Return an array of sizes of partitions in a device or image in sectors"""
+
+ return __get_fdisk_list_numeric_column(location, 3)
+
+def __get_fdisk_list_numeric_column(location, column):
+ return map(int, __filter_fdisk_list_output('%s(?:\d+[\*\s]+){%d}(\d+)' %
+ (location, column), location))
+
+def __filter_fdisk_list_output(regex, location):
+ r = re.compile(regex, re.DOTALL)
+ m = re.findall(r, subprocess.check_output(['fdisk', '-l', location]))
+ if m:
+ return m
+ else:
+ raise PartitioningError('Error reading information from fdisk')
+
+def human_size(size_string):
+ """Parse strings for human readable size factors"""
+
+ facts_of_1024 = ['', 'k', 'm', 'g', 't']
+ m = re.match('^(\d+)([kmgtKMGT]?)$', str(size_string))
+ if not m:
+ return size_string
+ return int(m.group(1)) * (1024 ** facts_of_1024.index(m.group(2).lower()))
+
+@contextlib.contextmanager
+def create_loopback(mount_path, offset=0, size=0):
+ """
+ Create a loopback device for accessing partitions in block devices
+
+ Args:
+ mount_path: String path to mount
+ offset: Offset of the start of a partition in bytes (default 0)
+ size: Limits the size of the partition, in bytes (default 0). This is
+ important when creating filesystems, otherwise tools often
+ corrupt areas beyond the desired limits of the partition.
+ Returns:
+ The path to a created loopback device node
+ """
+
+ try:
+ base_args = ['losetup', '--show', '-f', '-P', '-o', str(offset)]
+ if size and offset:
+ cmd = base_args + ['--sizelimit', str(size), mount_path]
+ else:
+ cmd = base_args + [mount_path]
+ loop_device = subprocess.check_output(cmd).rstrip()
+ # Allow the system time to see the new device On some systems, mounts
+ # created on the loopdev too soon after creating the loopback device
+ # may be unreliable, even though the -P option (--partscan) is passed
+ # to losetup
+ time.sleep(1)
+ except subprocess.CalledProcessError:
+ PartitioningError('Error creating loopback')
+ try:
+ yield loop_device
+ finally:
+ subprocess.check_call(['losetup', '-d', loop_device])
+
+def get_pt_type(location):
+ """Read the partition table type from location (device or image)"""
+
+ pt_type = __get_blkid_output('PTTYPE', location).lower()
+ return 'none' if pt_type == '' else pt_type
+
+def __get_blkid_output(field, location):
+ return subprocess.check_output(['blkid', '-p', '-o', 'value',
+ '-s', field, location]).rstrip()
+
+def get_partition_uuid(location, part_num, pt_type=None):
+ """
+ Read the partition UUID (MBR or GPT) for location (device or image)
+
+ Args:
+ location: Path to device or image
+ part_num: Integer number of the partition
+ pt_type: The partition table format (MBR or GPT)
+ """
+
+ if not pt_type:
+ pt_type = get_pt_type(location)
+ if pt_type == 'gpt':
+ return get_partition_gpt_guid(location, part_num)
+ elif pt_type == 'mbr':
+ return get_partition_mbr_uuid(location, part_num)
+
+def get_partition_mbr_uuid(location, part_num):
+ """
+ Get a partition's UUID in a device using MBR partition table
+
+ In Linux, MBR partition UUIDs are comprised of the NT disk signature,
+ followed by '-' and a two digit, zero-padded partition number. This is
+ necessary since the MBR does not provide per-partition GUIDs as GPT
+ partition tables do. This can be passed to the kernel with
+ "root=PARTUUID=$UUID" to identify a partition containing a root
+ filesystem.
+
+ Args:
+ partition: A partition object
+ location: Location of the storage device containing the partition -
+ an image or device node
+ Returns:
+ A UUID referring to an MBR partition, e.g. '97478dab-02'
+ """
+
+ pt_uuid = __get_blkid_output('PTUUID', location).upper()
+ return '%s-%02d' % (pt_uuid, part_num)
+
+def get_partition_gpt_guid(location, part_num):
+ """
+ Get a partition's GUID from a GPT partition table
+
+ This is read directly from the partition table, since current fdisk does
+ not support reading GPT partition GUIDs. It does not require special tools
+ (gfdisk). This is the GUID which identifies the partition, created with
+ the partition table, as opposed to the filesystem UUID, created with the
+ filesystem. It is particularly useful for specifying the partition which
+ the Linux kernel can use on boot to find the root filesystem, e.g. when
+ using the kernel command line "root=PARTUUID=$UUID"
+
+ Args:
+ part_num: The partition number
+ location: Location of the storage device containing the partition -
+ an image path or device node
+ Returns:
+ A GUID string, e.g. 'B342D1AB-4B65-4601-97DC-D6DF3FE2E95E'
+ """
+
+ sector_size = get_sector_size(location)
+ # The partition GUID is located two sectors (protective MBR + GPT header)
+ # plus 128 bytes for each partition entry in the table, plus 16 bytes for
+ # the location of the partition's GUID
+ guid_offset = (2 * sector_size) + (128 * (part_num - 1)) + 16
+
+ with open(location, 'rb') as f:
+ f.seek(guid_offset)
+ raw_uuid_bin = f.read(16)
+
+ a = ''
+ for c in raw_uuid_bin:
+ a += '%02X' % ord(c)
+
+ return ('%s%s%s%s-%s%s-%s%s-%s-%s' %
+ (a[6:8], a[4:6], a[2:4], a[0:2],
+ a[10:12], a[8:10],
+ a[14:16], a[12:14],
+ a[16:20], a[20:32]))
diff --git a/old/extensions/rawdisk.check b/old/extensions/rawdisk.check
new file mode 100755
index 00000000..e7aed390
--- /dev/null
+++ b/old/extensions/rawdisk.check
@@ -0,0 +1,52 @@
+#!/usr/bin/python2
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'rawdisk' write extension'''
+
+import os
+
+import writeexts
+
+
+class RawdiskCheckExtension(writeexts.WriteExtension):
+ def process_args(self, args):
+ if len(args) != 1:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ location = args[0]
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ if not self.is_device(location):
+ if not os.path.isfile(location):
+ raise writeexts.ExtensionError(
+ 'Cannot upgrade %s: it is not an existing disk image' %
+ location)
+
+ version_label = os.environ.get('VERSION_LABEL')
+ if version_label is None:
+ raise writeexts.ExtensionError(
+ 'VERSION_LABEL was not given. It is required when '
+ 'upgrading an existing system.')
+ else:
+ if not self.is_device(location):
+ if os.path.exists(location):
+ raise writeexts.ExtensionError(
+ 'Target %s already exists. Use `morph upgrade` if you '
+ 'want to update an existing image.' % location)
+
+RawdiskCheckExtension().run()
diff --git a/old/extensions/rawdisk.write b/old/extensions/rawdisk.write
new file mode 100755
index 00000000..ad81ca45
--- /dev/null
+++ b/old/extensions/rawdisk.write
@@ -0,0 +1,122 @@
+#!/usr/bin/python2
+# Copyright (C) 2012-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for raw disk images.'''
+
+
+import contextlib
+import os
+import pyfdisk
+import re
+import subprocess
+import sys
+import time
+import tempfile
+
+import writeexts
+
+
+class RawDiskWriteExtension(writeexts.WriteExtension):
+
+ '''See rawdisk.write.help for documentation'''
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ temp_root, location = args
+ upgrade = self.get_environment_boolean('UPGRADE')
+
+ if upgrade:
+ self.upgrade_local_system(location, temp_root)
+ else:
+ try:
+ if not self.is_device(location):
+ with self.created_disk_image(location):
+ self.create_baserock_system(temp_root, location)
+ self.status(msg='Disk image has been created at %s' %
+ location)
+ else:
+ self.create_baserock_system(temp_root, location)
+ self.status(msg='System deployed to %s' % location)
+ except Exception:
+ self.status(msg='Failure to deploy system to %s' %
+ location)
+ raise
+
+ def upgrade_local_system(self, location, temp_root):
+ self.complete_fstab_for_btrfs_layout(temp_root)
+
+ try:
+ with self.mount(location) as mp:
+ self.do_upgrade(mp, temp_root)
+ return
+ except subprocess.CalledProcessError:
+ pass
+
+ # At this point, we have failed to mount a raw image, so instead
+ # search for a Baserock root filesystem in the device's partitions
+ with self.find_and_mount_rootfs(location) as mp:
+ self.do_upgrade(mp, temp_root)
+
+ def do_upgrade(self, mp, temp_root):
+ version_label = self.get_version_label(mp)
+ self.status(msg='Updating image to a new version with label %s' %
+ version_label)
+
+ version_root = os.path.join(mp, 'systems', version_label)
+ os.mkdir(version_root)
+
+ old_orig = os.path.join(mp, 'systems', 'default', 'orig')
+ new_orig = os.path.join(version_root, 'orig')
+ subprocess.check_call(
+ ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig])
+
+ subprocess.check_call(
+ ['rsync', '-a', '--checksum', '--numeric-ids', '--delete',
+ temp_root + os.path.sep, new_orig])
+
+ self.create_run(version_root)
+
+ default_path = os.path.join(mp, 'systems', 'default')
+ if os.path.exists(default_path):
+ os.remove(default_path)
+ else:
+ # we are upgrading and old system that does
+ # not have an updated extlinux config file
+ if self.bootloader_config_is_wanted():
+ self.generate_bootloader_config(mp)
+ self.install_bootloader(mp)
+ os.symlink(version_label, default_path)
+
+ if self.bootloader_config_is_wanted():
+ self.install_kernel(version_root, temp_root)
+
+ def get_version_label(self, mp):
+ version_label = os.environ.get('VERSION_LABEL')
+
+ if version_label is None:
+ raise writeexts.ExtensionError('VERSION_LABEL was not given')
+
+ if os.path.exists(os.path.join(mp, 'systems', version_label)):
+ raise writeexts.ExtensionError('VERSION_LABEL %s already exists'
+ % version_label)
+
+ return version_label
+
+
+RawDiskWriteExtension().run()
diff --git a/old/extensions/rawdisk.write.help b/old/extensions/rawdisk.write.help
new file mode 100644
index 00000000..72e285b7
--- /dev/null
+++ b/old/extensions/rawdisk.write.help
@@ -0,0 +1,127 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Write a system produced by Morph to a physical disk, or to a file that can
+ be used as a virtual disk. The target will be formatted as a single Btrfs
+ partition, with the system image written to a subvolume in /systems, and
+ other subvolumes created for /home, /opt, /root, /srv and /var.
+
+ When written to a physical drive, the drive can be used as the boot device
+ for a 'real' machine.
+
+ When written to a file, the file can be used independently of `morph` to
+ create virtual machines with KVM / libvirt, OpenStack or, after converting
+ it to VDI format, VirtualBox.
+
+ `morph deploy` will fail if the file specified by `location` already
+ exists.
+
+ If used in `morph upgrade`, the rootfs produced by 'morph build' is added
+ to the existing raw disk image or device as an additional btrfs sub-volume.
+ `morph upgrade` will fail if the file specified by `location` does not
+ exist, or is not a Baserock raw disk image. (Most users are unlikely to
+ need or use this functionality: it is useful mainly for developers working
+ on the Baserock tools.)
+
+ Parameters:
+
+ * location: the pathname of the disk image to be created/upgraded, or the
+ path to the physical device.
+
+ * VERSION_LABEL=label - should contain only alpha-numeric
+ characters and the '-' (hyphen) character. Mandatory if being used with
+ `morph update`
+
+ * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to
+ tell Linux to use, rather than booting the rootfs directly.
+
+ * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree
+ binary - Give the full path (without a leading /) to the location of the
+ DTB in the built system image . The deployment will fail if `path` does
+ not exist.
+
+ * BOOTLOADER_INSTALL=value: the bootloader to be installed
+ **(MANDATORY)** for non-x86 systems
+
+ allowed values =
+ - 'extlinux' (default) - the extlinux bootloader will
+ be installed
+ - 'none' - no bootloader will be installed by `morph deploy`. A
+ bootloader must be installed manually. This value must be used when
+ deploying non-x86 systems such as ARM.
+
+ * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used.
+ If not specified for x86-32 and x86-64 systems, 'extlinux' will be used
+
+ allowed values =
+ - 'extlinux'
+
+ * KERNEL_ARGS=args: optional additional kernel command-line parameters to
+ be appended to the default set. The default set is:
+
+ 'rw init=/sbin/init rootfstype=btrfs \
+ rootflags=subvol=systems/default/run \
+ root=[name or UUID of root filesystem]'
+
+ (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt)
+
+ * PARTITION_FILE=path: path to a YAML partition specification to use for
+ producing partitioned disks or devices. The default specification is
+ 'partitioning/default' in definitions, which specifies a device with a
+ single partition. This may serve as an example of the format of this
+ file, or check the pyfdisk.py documentation in pyfdisk.README.
+
+ In addition to the features available in pyfdisk.py, using this
+ extension, a list of 'raw_files' items can be added at the partition
+ level, or the top level of the partition specification. This specifies
+ files to be written directly to the target device or image using `dd`
+
+ start_offset: 2048
+ partition_table_format: mbr
+ partitions:
+ - description: boot
+ filesystem: none
+ ...
+ raw_files:
+ - file: boot/uboot.img
+ raw_files:
+ - file: boot/uboot-env.img
+ offset_bytes: 512
+ - file: boot/preloader.bin
+ skip_bytes: 128
+ count_bytes: 16K
+
+ * Files are written consecutively in the order they are listed, and
+ sourced from the unpacked root filesystem image
+ * Files can be given a specific offset with 'offset_sectors' or
+ 'offset_bytes'
+ * With 'raw_files' specified inside a partition, 'offset_sectors' or
+ 'offset_bytes' is counted from the start of that partition,
+ otherwise from the start of the device.
+ * For files without an explicit offset, the next file is written
+ starting with the next free byte following the previous file
+ * Providing an offset is optional for all files
+ * Specifying 'skip_bytes' will set the 'skip=' option for dd, skipping
+ a number of bytes at the start of the input file
+ * Specifying 'count_bytes' sets the 'count=' option for dd
+ * For properties which take an input in bytes, a human-readable
+ multiplier can be used, e.g. K, M, G (integer multiplicands only)
+
+ * USE_PARTITIONING=boolean (default: no) Use this flag to enable
+ partitioning functions.
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/old/extensions/recv-hole b/old/extensions/recv-hole
new file mode 100755
index 00000000..fe69f304
--- /dev/null
+++ b/old/extensions/recv-hole
@@ -0,0 +1,158 @@
+#!/bin/sh
+#
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-2 =*=
+
+
+# Receive a data stream describing a sparse file, and reproduce it,
+# either to a named file or stdout.
+#
+# The data stream is simple: it's a sequence of DATA or HOLE records:
+#
+# DATA
+# 123
+# <123 bytes of binary data, NOT including newline at the end>
+#
+# HOLE
+# 123
+#
+# This shell script can be executed over ssh (given to ssh as an arguemnt,
+# with suitable escaping) on a different computer. This allows a large
+# sparse file (e.g., disk image) be transferred quickly.
+#
+# This script should be called in one of the following ways:
+#
+# recv-hole file FILENAME
+# recv-hole vbox FILENAME DISKSIZE
+#
+# In both cases, FILENAME is the pathname of the disk image on the
+# receiving end. DISKSIZE is the size of the disk image in bytes. The
+# first form is used when transferring a disk image to become an
+# identical file on the receiving end.
+#
+# The second form is used when the disk image should be converted for
+# use by VirtualBox. In this case, we want to avoid writing a
+# temporary file on disk, and then calling the VirtualBox VBoxManage
+# tool to do the conversion, since that would involve large amounts of
+# unnecessary I/O and disk usage. Instead we pipe the file directly to
+# VBoxManage, avoiding those issues. The piping is done here in this
+# script, instead of in the caller, to make it easier to run things
+# over ssh.
+#
+# However, since it's not possible seek in a Unix pipe, we have to
+# explicitly write the zeroes into the pipe. This is not
+# super-efficient, but the way to avoid that would be to avoid sending
+# a sparse file, and do the conversion to a VDI on the sending end.
+# That is out of scope for xfer-hole and recv-hole.
+
+
+set -eu
+
+
+die()
+{
+ echo "$@" 1>&2
+ exit 1
+}
+
+
+recv_hole_to_file()
+{
+ local n
+
+ read n
+ truncate --size "+$n" "$1"
+}
+
+
+recv_data_to_file()
+{
+ local n
+ read n
+
+ local blocksize=1048576
+ local blocks=$(($n / $blocksize))
+ local extra=$(($n % $blocksize))
+
+ xfer_data_to_stdout "$blocksize" "$blocks" >> "$1"
+ xfer_data_to_stdout 1 "$extra" >> "$1"
+}
+
+
+recv_hole_to_stdout()
+{
+ local n
+ read n
+ (echo "$n"; cat /dev/zero) | recv_data_to_stdout
+}
+
+
+recv_data_to_stdout()
+{
+ local n
+ read n
+
+ local blocksize=1048576
+ local blocks=$(($n / $blocksize))
+ local extra=$(($n % $blocksize))
+
+ xfer_data_to_stdout "$blocksize" "$blocks"
+ xfer_data_to_stdout 1 "$extra"
+}
+
+
+xfer_data_to_stdout()
+{
+ local log="$(mktemp)"
+ if ! dd "bs=$1" count="$2" iflag=fullblock status=noxfer 2> "$log"
+ then
+ cat "$log" 1>&2
+ rm -f "$log"
+ exit 1
+ else
+ rm -f "$log"
+ fi
+}
+
+
+type="$1"
+case "$type" in
+ file)
+ output="$2"
+ truncate --size=0 "$output"
+ while read what
+ do
+ case "$what" in
+ DATA) recv_data_to_file "$output" ;;
+ HOLE) recv_hole_to_file "$output" ;;
+ *) die "Unknown instruction: $what" ;;
+ esac
+ done
+ ;;
+ vbox)
+ output="$2"
+ disk_size="$3"
+ while read what
+ do
+ case "$what" in
+ DATA) recv_data_to_stdout ;;
+ HOLE) recv_hole_to_stdout ;;
+ *) die "Unknown instruction: $what" ;;
+ esac
+ done |
+ VBoxManage convertfromraw stdin "$output" "$disk_size"
+ ;;
+esac
diff --git a/old/extensions/sdk.write b/old/extensions/sdk.write
new file mode 100755
index 00000000..8d3d2a63
--- /dev/null
+++ b/old/extensions/sdk.write
@@ -0,0 +1,284 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# =*= License: GPL-2 =*=
+
+set -eu
+
+die(){
+ echo "$@" >&2
+ exit 1
+}
+
+shellescape(){
+ echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'"
+}
+
+########################## END OF COMMON HEADER ###############################
+#
+# The above lines, as well as being part of this script, are copied into the
+# self-installing SDK blob's header script, as a means of re-using content.
+#
+
+help(){
+ cat <<EOF
+sdk.write: Write extension for making an SDK installer.
+
+Description:
+ This is a write extension for producing a self-installing SDK blob
+ from a configured system.
+
+ It generates a shell script header and appends the rootfs as a tarball,
+ which the header later extracts, and performs various configuration
+ to have it useable as a relocatable toolchain.
+
+ This is similar to what the shar and makeself programs do, but we
+ need custom setup, so shar isn't appropriate, and makeself's api is
+ insufficiently flexible for our requirements.
+
+ The toolchain relocation is handled by sedding every text file in the
+ SDK directory, and using the patchelf from inside the SDK to change
+ every ELF binary in the toolchain to use the linker and libraries from
+ inside the SDK.
+
+ The ELF patching is required so that the SDK can work independently
+ of the versions of libraries installed on the host system.
+
+Location: Path to create the script at
+
+ENV VARS:
+ PREFIX (optional) The prefix the toolchain is built with
+ defaults to /usr
+ TARGET (mandatory) The gnu triplet the toolchain was built with
+EOF
+}
+
+ROOTDIR="$1"
+OUTPUT_SCRIPT="$2"
+PREFIX=${PREFIX-/usr}
+
+find_patchelf(){
+ # Look for patchelf in the usual places
+ for binpath in /bin "$PREFIX/bin"; do
+ if [ -x "$ROOTDIR$binpath/patchelf" ]; then
+ echo "$binpath/patchelf"
+ return
+ fi
+ done
+ die "patchelf not found in rootfs"
+}
+
+read_elf_interpreter(){
+ # Use readelf and sed to find the interpreter a binary uses this is
+ # required since we can't yet guarantee that the deploying system
+ # contains patchelf
+ readelf --wide --program-headers "$1" |
+ sed -nr -f /proc/self/fd/3 3<<'EOF'
+/\s+INTERP/{
+ n # linker is on line after INTERP line
+ s/^\s*\[Requesting program interpreter: (.*)]$/\1/
+ p # in -n mode, so need to print our text
+}
+EOF
+}
+
+find_lib_paths(){
+ local found_first=false
+ for libpath in "$PREFIX/lib32" "$PREFIX/lib64" "$PREFIX/lib" \
+ /lib32 /lib64 /lib; do
+ if [ -e "$ROOTDIR$libpath" ]; then
+ if "$found_first"; then
+ printf ":%s" "$libpath"
+ else
+ printf "%s" "$libpath"
+ found_first=true
+ fi
+ fi
+ done
+}
+
+# Create script with common header
+header_end=$(grep -En -m1 -e '^#+ END OF COMMON HEADER #+$' "$0" | cut -d: -f1)
+head -n "$header_end" "$0" | install -m 755 -D /dev/stdin "$OUTPUT_SCRIPT"
+
+# Determine any config
+PATCHELF="$(find_patchelf)"
+RTLD="$(read_elf_interpreter "$ROOTDIR$PATCHELF")"
+LIB_PATH="${LIB_PATH-$(find_lib_paths)}"
+
+# Append deploy-time config to header
+cat >>"$OUTPUT_SCRIPT" <<EOF
+#################### START OF DEPLOY TIME CONFIGURATION #######################
+
+TARGET=$(shellescape "$TARGET")
+PREFIX=$(shellescape "$PREFIX")
+PATCHELF=$(shellescape "$PATCHELF")
+RTLD=$(shellescape "$RTLD")
+LIB_PATH=$(shellescape "$LIB_PATH")
+
+##################### END OF DEPLOY TIME CONFIGURATION ########################
+EOF
+
+# Append deployment script
+cat >>"$OUTPUT_SCRIPT" <<'EOF'
+########################### START OF HEADER SCRIPT ############################
+
+usage(){
+ cat <<USAGE
+usage: $0 TOOLCHAIN_PATH
+USAGE
+}
+
+if [ "$#" != 1 ]; then
+ echo TOOLCHAIN_PATH not given >&2
+ usage >&2
+ exit 1
+fi
+
+TOOLCHAIN_PATH="$(readlink -f \"$1\")"
+
+sedescape(){
+ # Escape the passed in string so it can be safely interpolated into
+ # a sed expression as a literal value.
+ echo "$1" | sed -e 's/[\/&]/\\&/g'
+}
+
+prepend_to_path_elements(){
+ # Prepend $1 to every entry in the : separated list specified as $2.
+ local prefix="$1"
+ (
+ # Split path into components
+ IFS=:
+ set -- $2
+ # Print path back out with new prefix
+ printf %s "$prefix/$1"
+ shift
+ for arg in "$@"; do
+ printf ":%s" "$prefix/$arg"
+ done
+ )
+}
+
+extract_rootfs(){
+ # Extract the bzipped tarball at the end of the script passed as $1
+ # to the path specified as $2
+ local selfextractor="$1"
+ local target="$2"
+ local script_end="$(($(\
+ grep -aEn -m1 -e '^#+ END OF HEADER SCRIPT #+$' "$selfextractor" |
+ cut -d: -f1) + 1 ))"
+ mkdir -p "$target"
+ tail -n +"$script_end" "$selfextractor" | tar -xj -C "$target" .
+}
+
+amend_text_file_paths(){
+ # Replace all instances of $3 with $4 in the directory specified by $1
+ # excluding the subdirectory $2
+ local root="$1"
+ local inner_sysroot="$2"
+ local old_prefix="$3"
+ local new_prefix="$4"
+ find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \
+ -exec sh -c 'file "$1" | grep -q text' - {} \; \
+ -exec sed -i -e \
+ "s/$(sedescape "$old_prefix")/$(sedescape "$new_prefix")/g" {} +
+}
+
+filter_patchelf_errors(){
+ # Filter out warnings from patchelf that are acceptable
+ # The warning that it's making a file bigger is just noise
+ # The warning about not being an ELF executable just means we got a
+ # false positive from file that it was an ELF binary
+ # Failing to find .interp is because for convenience, we set the
+ # interpreter in the same command as setting the rpath, even though
+ # we give it both executables and libraries.
+ grep -v -e 'warning: working around a Linux kernel bug' \
+ -e 'not an ELF executable' \
+ -e 'cannot find section .interp'
+}
+
+patch_elves(){
+ # Set the interpreter and library paths of ELF binaries in $1,
+ # except for the $2 subdirectory, using the patchelf command in the
+ # toolchain specified as $3, so that it uses the linker specified
+ # as $4 as the interpreter, and the runtime path specified by $5.
+ #
+ # The patchelf inside the toolchain is used to ensure that it works
+ # independently of the availability of patchelf on the host.
+ #
+ # This is possible by invoking the linker directly and specifying
+ # --linker-path as the RPATH we want to set the binaries to use.
+ local root="$1"
+ local inner_sysroot="$2"
+ local patchelf="$3"
+ local linker="$4"
+ local lib_path="$5"
+ find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \
+ -type f -perm +u=x \
+ -exec sh -c 'file "$1" | grep -q "ELF"' - {} \; \
+ -exec "$linker" --library-path "$lib_path" \
+ "$patchelf" --set-interpreter "$linker" \
+ --set-rpath "$lib_path" {} \; 2>&1 \
+ | filter_patchelf_errors
+}
+
+generate_environment_setup(){
+ local target="$1"
+ install -m 644 -D /dev/stdin "$target" <<ENVSETUP
+export PATH=$(shellescape "$TOOLCHAIN_PATH/usr/bin"):"\$PATH"
+export TARGET_PREFIX=$(shellescape "$TARGET"-)
+export CC=$(shellescape "$TARGET-gcc")
+export CXX=$(shellescape "$TARGET-g++")
+export CPP=$(shellescape "$TARGET-gcc -E")
+export AS=$(shellescape "$TARGET-as")
+export LD=$(shellescape "$TARGET-ld")
+export STRIP=$(shellescape "$TARGET-strip")
+export RANLIB=$(shellescape "$TARGET-ranlib")
+export OBJCOPY=$(shellescape "$TARGET-objcopy")
+export OBJDUMP=$(shellescape "$TARGET-objdump")
+export AR=$(shellescape "$TARGET-ar")
+export NM=$(shellescape "$TARGET-nm")
+ENVSETUP
+}
+
+SYSROOT="$TOOLCHAIN_PATH$PREFIX/$TARGET/sys-root"
+PATCHELF="$TOOLCHAIN_PATH$PATCHELF"
+RTLD="$TOOLCHAIN_PATH$RTLD"
+OLD_PREFIX="$PREFIX"
+NEW_PREFIX="$TOOLCHAIN_PATH/$PREFIX"
+RPATH="$(prepend_to_path_elements "$TOOLCHAIN_PATH" "$LIB_PATH")"
+ENV_SETUP_FILE="$(dirname "$TOOLCHAIN_PATH")/environment-setup-$TARGET"
+
+echo Writing environment setup script to "$ENV_SETUP_FILE"
+generate_environment_setup "$ENV_SETUP_FILE"
+
+echo Extracting rootfs
+extract_rootfs "$0" "$TOOLCHAIN_PATH"
+
+echo "Relocating prefix references of $OLD_PREFIX to $NEW_PREFIX in" \
+ "the toolchain's textual config files."
+amend_text_file_paths "$TOOLCHAIN_PATH" "$SYSROOT" "$OLD_PREFIX" "$NEW_PREFIX"
+
+echo "Patching ELF binary files' interpreter and runtime library paths" \
+ "to refer to libraries within the toolchain"
+patch_elves "$TOOLCHAIN_PATH" "$SYSROOT" "$PATCHELF" "$RTLD" "$RPATH"
+
+exit
+############################ END OF HEADER SCRIPT #############################
+EOF
+
+# Append rootfs as tarball
+tar -C "$1" -cj >>"$OUTPUT_SCRIPT" .
diff --git a/old/extensions/set-hostname.configure b/old/extensions/set-hostname.configure
new file mode 100755
index 00000000..3c400563
--- /dev/null
+++ b/old/extensions/set-hostname.configure
@@ -0,0 +1,27 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Set hostname on system from HOSTNAME.
+
+
+set -e
+
+if [ -n "$HOSTNAME" ]
+then
+ mkdir -p "$1/etc"
+ echo "$HOSTNAME" > "$1/etc/hostname"
+fi
+
diff --git a/old/extensions/simple-network.configure b/old/extensions/simple-network.configure
new file mode 100755
index 00000000..67f46bc4
--- /dev/null
+++ b/old/extensions/simple-network.configure
@@ -0,0 +1,296 @@
+#!/usr/bin/python2
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''A Morph deployment configuration extension to handle network configutation
+
+This extension prepares /etc/network/interfaces and networkd .network files
+in /etc/systemd/network/ with the interfaces specified during deployment.
+
+If no network configuration is provided, eth0 will be configured for DHCP
+with the hostname of the system in the case of /etc/network/interfaces.
+In the case of networkd, any interface starting by e* will be configured
+for DHCP
+'''
+
+
+import errno
+import os
+import sys
+
+import writeexts
+
+
+class SimpleNetworkError(writeexts.ExtensionError):
+ '''Errors associated with simple network setup'''
+ pass
+
+
+class SimpleNetworkConfigurationExtension(object):
+ '''Configure /etc/network/interfaces and generate networkd .network files
+
+ Reading NETWORK_CONFIG, this extension sets up /etc/network/interfaces
+ and .network files in /etc/systemd/network/.
+ '''
+
+ def run(self, args):
+ network_config = os.environ.get("NETWORK_CONFIG")
+
+ self.rename_networkd_chunk_file(args)
+
+ if network_config is None:
+ self.generate_default_network_config(args)
+ else:
+ self.status(msg="Processing NETWORK_CONFIG=%(nc)s",
+ nc=network_config)
+
+ stanzas = self.parse_network_stanzas(network_config)
+
+ self.generate_interfaces_file(args, stanzas)
+ self.generate_networkd_files(args, stanzas)
+
+ def rename_networkd_chunk_file(self, args):
+ """Rename the 10-dchp.network file generated in the systemd chunk
+
+ The systemd chunk will place something in 10-dhcp.network, which will
+ have higher precedence than anything added in this extension (we
+ start at 50-*).
+
+ We should check for that file and rename it instead remove it in
+ case the file is being used by the user.
+
+ Until both the following happen, we should continue to rename that
+ default config file:
+
+ 1. simple-network.configure is always run when systemd is included
+ 2. We've been building systems without systemd including that default
+ networkd config for long enough that nobody should be including
+ that config file.
+ """
+ file_path = os.path.join(args[0], "etc", "systemd", "network",
+ "10-dhcp.network")
+
+ if os.path.isfile(file_path):
+ try:
+ os.rename(file_path, file_path + ".morph")
+ self.status(msg="Renaming networkd file from systemd chunk: \
+ %(f)s to %(f)s.morph", f=file_path)
+ except OSError:
+ pass
+
+ def generate_default_network_config(self, args):
+ """Generate default network config: DHCP in all the interfaces"""
+
+ default_network_config_interfaces = "lo:loopback;" \
+ "eth0:dhcp,hostname=$(hostname)"
+ default_network_config_networkd = "e*:dhcp"
+
+ stanzas_interfaces = self.parse_network_stanzas(
+ default_network_config_interfaces)
+ stanzas_networkd = self.parse_network_stanzas(
+ default_network_config_networkd)
+
+ self.generate_interfaces_file(args, stanzas_interfaces)
+ self.generate_networkd_files(args, stanzas_networkd)
+
+ def generate_interfaces_file(self, args, stanzas):
+ """Generate /etc/network/interfaces file"""
+
+ iface_file = self.generate_iface_file(stanzas)
+
+ directory_path = os.path.join(args[0], "etc", "network")
+ self.make_sure_path_exists(directory_path)
+ file_path = os.path.join(directory_path, "interfaces")
+ with open(file_path, "w") as f:
+ f.write(iface_file)
+
+ def generate_iface_file(self, stanzas):
+ """Generate an interfaces file from the provided stanzas.
+
+ The interfaces will be sorted by name, with loopback sorted first.
+ """
+
+ def cmp_iface_names(a, b):
+ a = a['name']
+ b = b['name']
+ if a == "lo":
+ return -1
+ elif b == "lo":
+ return 1
+ else:
+ return cmp(a,b)
+
+ return "\n".join(self.generate_iface_stanza(stanza)
+ for stanza in sorted(stanzas, cmp=cmp_iface_names))
+
+ def generate_iface_stanza(self, stanza):
+ """Generate an interfaces stanza from the provided data."""
+
+ name = stanza['name']
+ itype = stanza['type']
+ lines = ["auto %s" % name, "iface %s inet %s" % (name, itype)]
+ lines += [" %s %s" % elem for elem in stanza['args'].items()]
+ lines += [""]
+ return "\n".join(lines)
+
+ def generate_networkd_files(self, args, stanzas):
+ """Generate .network files"""
+
+ for i, stanza in enumerate(stanzas, 50):
+ iface_file = self.generate_networkd_file(stanza)
+
+ if iface_file is None:
+ continue
+
+ directory_path = os.path.join(args[0], "etc", "systemd", "network")
+ self.make_sure_path_exists(directory_path)
+ file_path = os.path.join(directory_path,
+ "%s-%s.network" % (i, stanza['name']))
+
+ with open(file_path, "w") as f:
+ f.write(iface_file)
+
+ def generate_networkd_file(self, stanza):
+ """Generate an .network file from the provided data."""
+
+ name = stanza['name']
+ itype = stanza['type']
+ pairs = stanza['args'].items()
+
+ if itype == "loopback":
+ return
+
+ lines = ["[Match]"]
+ lines += ["Name=%s\n" % name]
+ lines += ["[Network]"]
+ if itype == "dhcp":
+ lines += ["DHCP=yes"]
+ else:
+ lines += self.generate_networkd_entries(pairs)
+
+ return "\n".join(lines)
+
+ def generate_networkd_entries(self, pairs):
+ """Generate networkd configuration entries with the other parameters"""
+
+ address = None
+ netmask = None
+ gateway = None
+ dns = None
+ lines = []
+
+ for pair in pairs:
+ if pair[0] == 'address':
+ address = pair[1]
+ elif pair[0] == 'netmask':
+ netmask = pair[1]
+ elif pair[0] == 'gateway':
+ gateway = pair[1]
+ elif pair[0] == 'dns':
+ dns = pair[1]
+
+ if address and netmask:
+ network_suffix = self.convert_net_mask_to_cidr_suffix (netmask);
+ address_line = address + '/' + str(network_suffix)
+ lines += ["Address=%s" % address_line]
+ elif address or netmask:
+ raise SimpleNetworkError(
+ 'address and netmask must be specified together')
+
+ if gateway:
+ lines += ["Gateway=%s" % gateway]
+
+ if dns:
+ lines += ["DNS=%s" % dns]
+
+ return lines
+
+ def convert_net_mask_to_cidr_suffix(self, mask):
+ """Convert dotted decimal form of a subnet mask to CIDR suffix notation
+
+ For example: 255.255.255.0 -> 24
+ """
+ return sum(bin(int(x)).count('1') for x in mask.split('.'))
+
+ def parse_network_stanzas(self, config):
+ """Parse a network config environment variable into stanzas.
+
+ Network config stanzas are semi-colon separated.
+ """
+
+ return [self.parse_network_stanza(s) for s in config.split(";")]
+
+ def parse_network_stanza(self, stanza):
+ """Parse a network config stanza into name, type and arguments.
+
+ Each stanza is of the form name:type[,arg=value]...
+
+ For example:
+ lo:loopback
+ eth0:dhcp
+ eth1:static,address=10.0.0.1,netmask=255.255.0.0
+ """
+ elements = stanza.split(",")
+ lead = elements.pop(0).split(":")
+ if len(lead) != 2:
+ raise SimpleNetworkError("Stanza '%s' is missing its type" %
+ stanza)
+ iface = lead[0]
+ iface_type = lead[1]
+
+ if iface_type not in ['loopback', 'static', 'dhcp']:
+ raise SimpleNetworkError("Stanza '%s' has unknown interface type"
+ " '%s'" % (stanza, iface_type))
+
+ argpairs = [element.split("=", 1) for element in elements]
+ output_stanza = { "name": iface,
+ "type": iface_type,
+ "args": {} }
+ for argpair in argpairs:
+ if len(argpair) != 2:
+ raise SimpleNetworkError("Stanza '%s' has bad argument '%r'"
+ % (stanza, argpair.pop(0)))
+ if argpair[0] in output_stanza["args"]:
+ raise SimpleNetworkError("Stanza '%s' has repeated argument"
+ " %s" % (stanza, argpair[0]))
+ output_stanza["args"][argpair[0]] = argpair[1]
+
+ return output_stanza
+
+ def make_sure_path_exists(self, path):
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST and os.path.isdir(path):
+ pass
+ else:
+ raise SimpleNetworkError("Unable to create directory '%s'"
+ % path)
+
+ def status(self, **kwargs):
+ '''Provide status output.
+
+ The ``msg`` keyword argument is the actual message,
+ the rest are values for fields in the message as interpolated
+ by %.
+
+ '''
+
+ sys.stdout.write('%s\n' % (kwargs['msg'] % kwargs))
+
+try:
+ SimpleNetworkConfigurationExtension().run(sys.argv[1:])
+except SimpleNetworkError as e:
+ sys.stdout.write('ERROR: %s\n' % e)
+ sys.exit(1)
diff --git a/old/extensions/ssh-rsync.check b/old/extensions/ssh-rsync.check
new file mode 100755
index 00000000..5c2e5507
--- /dev/null
+++ b/old/extensions/ssh-rsync.check
@@ -0,0 +1,66 @@
+#!/usr/bin/python2
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'ssh-rsync' write extension'''
+
+
+import os
+
+import writeexts
+
+
+class SshRsyncCheckExtension(writeexts.WriteExtension):
+ def process_args(self, args):
+ if len(args) != 1:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if not upgrade:
+ raise writeexts.ExtensionError(
+ 'The ssh-rsync write is for upgrading existing remote '
+ 'Baserock machines. It cannot be used for an initial '
+ 'deployment.')
+
+ if os.environ.get('VERSION_LABEL', '') == '':
+ raise writeexts.ExtensionError(
+ 'A VERSION_LABEL must be set when deploying an upgrade.')
+
+ location = args[0]
+ self.check_ssh_connectivity(location)
+ self.check_is_baserock_system(location)
+
+ # The new system that being deployed as an upgrade must contain
+ # baserock-system-config-sync and system-version-manager. However, the
+ # old system simply needs to have SSH and rsync.
+ self.check_command_exists(location, 'rsync')
+
+ def check_is_baserock_system(self, location):
+ output = writeexts.ssh_runcmd(
+ location,
+ ['sh', '-c', 'test -d /baserock || echo -n dirnotfound'])
+ if output == 'dirnotfound':
+ raise writeexts.ExtensionError('%s is not a baserock system'
+ % location)
+
+ def check_command_exists(self, location, command):
+ test = 'type %s > /dev/null 2>&1 || echo -n cmdnotfound' % command
+ output = writeexts.ssh_runcmd(location, ['sh', '-c', test])
+ if output == 'cmdnotfound':
+ raise writeexts.ExtensionError(
+ "%s does not have %s" % (location, command))
+
+
+SshRsyncCheckExtension().run()
diff --git a/old/extensions/ssh-rsync.write b/old/extensions/ssh-rsync.write
new file mode 100755
index 00000000..1045c528
--- /dev/null
+++ b/old/extensions/ssh-rsync.write
@@ -0,0 +1,175 @@
+#!/usr/bin/python2
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for upgrading systems over ssh.'''
+
+
+import contextlib
+import os
+import subprocess
+import sys
+import tempfile
+import time
+
+import writeexts
+
+
+def ssh_runcmd_ignore_failure(location, command, **kwargs):
+ try:
+ return writeexts.ssh_runcmd(location, command, **kwargs)
+ except writeexts.ExtensionError:
+ pass
+
+
+class SshRsyncWriteExtension(writeexts.WriteExtension):
+
+ '''See ssh-rsync.write.help for documentation'''
+
+
+ def find_root_disk(self, location):
+ '''Read /proc/mounts on location to find which device contains "/"'''
+
+ self.status(msg='Finding device that contains "/"')
+ contents = writeexts.ssh_runcmd(location,
+ ['cat', '/proc/mounts'])
+ for line in contents.splitlines():
+ line_words = line.split()
+ if (line_words[1] == '/' and line_words[0] != 'rootfs'):
+ return line_words[0]
+
+ @contextlib.contextmanager
+ def _remote_mount_point(self, location):
+ self.status(msg='Creating remote mount point')
+ remote_mnt = writeexts.ssh_runcmd(location,
+ ['mktemp', '-d']).strip()
+ try:
+ yield remote_mnt
+ finally:
+ self.status(msg='Removing remote mount point')
+ writeexts.ssh_runcmd(location, ['rmdir', remote_mnt])
+
+ @contextlib.contextmanager
+ def _remote_mount(self, location, root_disk, mountpoint):
+ self.status(msg='Mounting root disk')
+ writeexts.ssh_runcmd(location, ['mount', root_disk, mountpoint])
+ try:
+ yield
+ finally:
+ self.status(msg='Unmounting root disk')
+ writeexts.ssh_runcmd(location, ['umount', mountpoint])
+
+ @contextlib.contextmanager
+ def _created_version_root(self, location, remote_mnt, version_label):
+ version_root = os.path.join(remote_mnt, 'systems', version_label)
+ self.status(msg='Creating %(root)s', root=version_root)
+ writeexts.ssh_runcmd(location, ['mkdir', version_root])
+ try:
+ yield version_root
+ except BaseException as e:
+ # catch all, we always want to clean up
+ self.status(msg='Cleaning up %(root)s', root=version_root)
+ ssh_runcmd_ignore_failure(location, ['rmdir', version_root])
+ raise
+
+ def get_old_orig(self, location, remote_mnt):
+ '''Identify which subvolume to snapshot from'''
+
+ # rawdisk upgrades use 'default'
+ return os.path.join(remote_mnt, 'systems', 'default', 'orig')
+
+ @contextlib.contextmanager
+ def _created_orig_subvolume(self, location, remote_mnt, version_root):
+ self.status(msg='Creating "orig" subvolume')
+ old_orig = self.get_old_orig(location, remote_mnt)
+ new_orig = os.path.join(version_root, 'orig')
+ writeexts.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot',
+ old_orig, new_orig])
+ try:
+ yield new_orig
+ except BaseException as e:
+ ssh_runcmd_ignore_failure(
+ location, ['btrfs', 'subvolume', 'delete', new_orig])
+ raise
+
+ def populate_remote_orig(self, location, new_orig, temp_root):
+ '''Populate the subvolume version_root/orig on location'''
+
+ self.status(msg='Populating "orig" subvolume')
+ subprocess.check_call(['rsync', '-as', '--checksum', '--numeric-ids',
+ '--delete', temp_root + os.path.sep,
+ '%s:%s' % (location, new_orig)])
+
+ @contextlib.contextmanager
+ def _deployed_version(self, location, version_label,
+ system_config_sync, system_version_manager):
+ self.status(msg='Calling system-version-manager to deploy upgrade')
+ deployment = os.path.join('/systems', version_label, 'orig')
+ writeexts.ssh_runcmd(location,
+ ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync,
+ system_version_manager, 'deploy', deployment])
+ try:
+ yield deployment
+ except BaseException as e:
+ self.status(msg='Cleaning up failed version installation')
+ writeexts.ssh_runcmd(location,
+ [system_version_manager, 'remove', version_label])
+ raise
+
+ def upgrade_remote_system(self, location, temp_root):
+ root_disk = self.find_root_disk(location)
+ uuid = writeexts.ssh_runcmd(location,
+ ['blkid', '-s', 'UUID', '-o', 'value', root_disk]).strip()
+
+ self.complete_fstab_for_btrfs_layout(temp_root, uuid)
+
+ version_label = os.environ['VERSION_LABEL']
+ autostart = self.get_environment_boolean('AUTOSTART')
+
+ with self._remote_mount_point(location) as remote_mnt, \
+ self._remote_mount(location, root_disk, remote_mnt), \
+ self._created_version_root(location, remote_mnt,
+ version_label) as version_root, \
+ self._created_orig_subvolume(location, remote_mnt,
+ version_root) as orig:
+ self.populate_remote_orig(location, orig, temp_root)
+ system_root = os.path.join(remote_mnt, 'systems',
+ version_label, 'orig')
+ config_sync = os.path.join(system_root, 'usr', 'bin',
+ 'baserock-system-config-sync')
+ version_manager = os.path.join(system_root, 'usr', 'bin',
+ 'system-version-manager')
+ with self._deployed_version(location, version_label,
+ config_sync, version_manager):
+ self.status(msg='Setting %(v)s as the new default system',
+ v=version_label)
+ writeexts.ssh_runcmd(location,
+ [version_manager, 'set-default', version_label])
+
+ if autostart:
+ self.status(msg="Rebooting into new system ...")
+ ssh_runcmd_ignore_failure(location, ['reboot'])
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ temp_root, location = args
+
+ self.upgrade_remote_system(location, temp_root)
+
+
+SshRsyncWriteExtension().run()
diff --git a/old/extensions/ssh-rsync.write.help b/old/extensions/ssh-rsync.write.help
new file mode 100644
index 00000000..f3f79ed5
--- /dev/null
+++ b/old/extensions/ssh-rsync.write.help
@@ -0,0 +1,50 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Upgrade a Baserock system which is already deployed:
+ - as a KVM/LibVirt, OpenStack or vbox-ssh virtual machine;
+ - on a Jetson board.
+
+ Copies a binary delta over to the target system and arranges for it
+ to be bootable.
+
+ The recommended way to use this extension is by calling `morph upgrade`.
+ Using `morph deploy --upgrade` is deprecated.
+
+ The upgrade will fail if:
+ - no VM is deployed and running at `location`;
+ - the target system is not a Baserock system;
+ - the target's filesystem and its layout are not compatible with that
+ created by `morph deploy`."
+
+ See also the 'Upgrading a Baserock installation' section of the 'Using
+ Baserock` page at wiki.baserock.org
+ http://wiki.baserock.org/devel-with/#index8h2
+
+ Parameters:
+
+ * location: the 'user@hostname' string that will be used by ssh and rsync.
+ 'user' will always be `root` and `hostname` the hostname or address of
+ the system being upgraded.
+
+ * VERSION_LABEL=label - **(MANDATORY)** should contain only alpha-numeric
+ characters and the '-' (hyphen) character.
+
+ * AUTOSTART=<VALUE>` - boolean. If it is set, the VM will be started when
+ it has been deployed.
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/old/extensions/sshkeys.configure b/old/extensions/sshkeys.configure
new file mode 100755
index 00000000..7a5a8379
--- /dev/null
+++ b/old/extensions/sshkeys.configure
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# Copyright 2014 Codethink Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+if [ "$SSHKEYS" ]
+then
+ install -d -m 700 "$1/root/.ssh"
+ echo Adding Key in "$SSHKEYS" to authorized_keys file
+ cat $SSHKEYS >> "$1/root/.ssh/authorized_keys"
+fi
diff --git a/old/extensions/strip-gplv3.configure b/old/extensions/strip-gplv3.configure
new file mode 100755
index 00000000..e4e836f4
--- /dev/null
+++ b/old/extensions/strip-gplv3.configure
@@ -0,0 +1,97 @@
+#!/usr/bin/python2
+# Copyright (C) 2013-2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+''' A Morph configuration extension for removing gplv3 chunks from a system
+
+Using a hard-coded list of chunks, it will read the system's /baserock metadata
+to find the files created by that chunk, then remove them.
+
+'''
+
+import os
+import re
+import subprocess
+import sys
+
+import writeexts
+
+import imp
+scriptslib = imp.load_source('scriptslib', 'scripts/scriptslib.py')
+
+class StripGPLv3ConfigureExtension(writeexts.Extension):
+ gplv3_chunks = [
+ ['autoconf', ''],
+ ['autoconf-tarball', ''],
+ ['automake', ''],
+ ['bash', ''],
+ ['binutils', ''],
+ ['bison', ''],
+ ['ccache', ''],
+ ['cmake', ''],
+ ['flex', ''],
+ ['gawk', ''],
+ ['gcc', r'^.*lib.*\.so(\.\d+)*$'],
+ ['gdbm', ''],
+ ['gettext-tarball', ''],
+ ['gperf', ''],
+ ['groff', ''],
+ ['libtool', r'^.*lib.*\.so(\.\d+)*$'],
+ ['libtool-tarball', r'^.*lib.*\.so(\.\d+)*$'],
+ ['m4', ''],
+ ['make', ''],
+ ['nano', ''],
+ ['patch', ''],
+ ['rsync', ''],
+ ['texinfo-tarball', ''],
+ ]
+
+ def process_args(self, args):
+ target_root = args[0]
+ meta_dir = os.path.join(target_root, 'baserock')
+ metadata = scriptslib.meta_load_from_dir(meta_dir)
+
+ for chunk in self.gplv3_chunks:
+ for meta in metadata.get_name(chunk[0]):
+ self.remove_chunk(
+ target_root, reversed(meta['contents']), chunk[1])
+
+ def remove_chunk(self, target_root, chunk_contents, pattern):
+ updated_contents = []
+ for content_entry in chunk_contents:
+ pat = re.compile(pattern)
+ if len(pattern) == 0 or not pat.match(content_entry):
+ self.remove_content_entry(target_root, content_entry)
+ else:
+ updated_contents.append(content_entry)
+
+ def remove_content_entry(self, target_root, content_entry):
+ entry_path = os.path.join(target_root, './' + content_entry)
+ if not entry_path.startswith(target_root):
+ raise writeexts.ExtensionError(
+ '%s is not in %s' % (entry_path, target_root))
+ if os.path.exists(entry_path):
+ if os.path.islink(entry_path):
+ os.unlink(entry_path)
+ elif os.path.isfile(entry_path):
+ os.remove(entry_path)
+ elif os.path.isdir(entry_path):
+ if not os.listdir(entry_path):
+ os.rmdir(entry_path)
+ else:
+ raise writeexts.ExtensionError(
+ '%s is not a link, file or directory' % entry_path)
+
+StripGPLv3ConfigureExtension().run(sys.argv[1:])
diff --git a/old/extensions/swift-build-rings.yml b/old/extensions/swift-build-rings.yml
new file mode 100644
index 00000000..1ffe9c37
--- /dev/null
+++ b/old/extensions/swift-build-rings.yml
@@ -0,0 +1,34 @@
+---
+- hosts: localhost
+ vars:
+ - rings:
+ - { name: account, port: 6002 }
+ - { name: container, port: 6001 }
+ - { name: object, port: 6000 }
+ remote_user: root
+ tasks:
+ - file: path={{ ansible_env.ROOT }}/etc/swift owner=root group=root state=directory
+
+ - name: Create ring
+ shell: swift-ring-builder {{ item.name }}.builder create {{ ansible_env.SWIFT_PART_POWER }}
+ {{ ansible_env.SWIFT_REPLICAS }} {{ ansible_env.SWIFT_MIN_PART_HOURS }}
+ with_items: rings
+
+ - name: Add each storage node to the ring
+ shell: swift-ring-builder {{ item[0].name }}.builder
+ add r1z1-{{ item[1].ip }}:{{ item[0].port }}/{{ item[1].device }} {{ item[1].weight }}
+ with_nested:
+ - rings
+ - ansible_env.SWIFT_STORAGE_DEVICES
+
+ - name: Rebalance the ring
+ shell: swift-ring-builder {{ item.name }}.builder rebalance {{ ansible_env.SWIFT_REBALANCE_SEED }}
+ with_items: rings
+
+ - name: Copy ring configuration files into place
+ copy: src={{ item.name }}.ring.gz dest={{ ansible_env.ROOT }}/etc/swift
+ with_items: rings
+
+ - name: Copy ring builder files into place
+ copy: src={{ item.name }}.builder dest={{ ansible_env.ROOT }}/etc/swift
+ with_items: rings
diff --git a/old/extensions/swift-storage-devices-validate.py b/old/extensions/swift-storage-devices-validate.py
new file mode 100755
index 00000000..57ab23d0
--- /dev/null
+++ b/old/extensions/swift-storage-devices-validate.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This is used by the openstack-swift.configure extension
+# to validate any provided storage device specifiers
+# under SWIFT_STORAGE_DEVICES
+#
+
+
+'''
+ This is used by the swift-storage.configure extension
+ to validate any storage device specifiers specified
+ in the SWIFT_STORAGE_DEVICES environment variable
+'''
+
+from __future__ import print_function
+
+import yaml
+import sys
+
+EXAMPLE_DEVSPEC = '{device: sdb1, ip: 127.0.0.1, weight: 100}'
+REQUIRED_KEYS = ['ip', 'device', 'weight']
+
+def err(msg):
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+if len(sys.argv) != 2:
+ err('usage: %s STRING_TO_BE_VALIDATED' % sys.argv[0])
+
+swift_storage_devices = yaml.load(sys.argv[1])
+
+if not isinstance(swift_storage_devices, list):
+ err('Expected list of device specifiers\n'
+ 'Example: [%s]' % EXAMPLE_DEVSPEC)
+
+for d in swift_storage_devices:
+ if not isinstance(d, dict):
+ err("Invalid device specifier: `%s'\n"
+ 'Device specifier must be a dictionary\n'
+ 'Example: %s' % (d, EXAMPLE_DEVSPEC))
+
+ if set(d.keys()) != set(REQUIRED_KEYS):
+ err("Invalid device specifier: `%s'\n"
+ 'Specifier should contain: %s\n'
+ 'Example: %s' % (d, str(REQUIRED_KEYS)[1:-1], EXAMPLE_DEVSPEC))
diff --git a/old/extensions/swift-storage.configure b/old/extensions/swift-storage.configure
new file mode 100644
index 00000000..391b392a
--- /dev/null
+++ b/old/extensions/swift-storage.configure
@@ -0,0 +1,107 @@
+#!/bin/bash
+#
+# Copyright © 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+# The ansible script needs to know where the rootfs is, so we export it here
+export ROOT="$1"
+
+validate_number() {
+ local name="$1"
+ local value="$2"
+
+ local pattern='^[0-9]+$'
+ if ! [[ $value =~ $pattern ]]
+ then
+ echo "'$name' must be a number" >&2
+ exit 1
+ fi
+}
+
+validate_non_empty() {
+ local name="$1"
+ local value="$2"
+
+ if [[ $value = None ]]
+ then
+ echo "'$name' cannot be empty" >&2
+ exit 1
+ fi
+}
+
+MANDATORY_OPTIONS="SWIFT_HASH_PATH_PREFIX \
+ SWIFT_HASH_PATH_SUFFIX \
+ SWIFT_REBALANCE_SEED \
+ SWIFT_PART_POWER \
+ SWIFT_REPLICAS \
+ SWIFT_MIN_PART_HOURS \
+ SWIFT_STORAGE_DEVICES \
+ CONTROLLER_HOST_ADDRESS \
+ MANAGEMENT_INTERFACE_IP_ADDRESS"
+
+for option in $MANDATORY_OPTIONS
+do
+ if ! [[ -v $option ]]
+ then
+ missing_option=True
+ echo "Required option $option isn't set!" >&2
+ fi
+done
+
+if [[ $missing_option = True ]]; then exit 1; fi
+
+./swift-storage-devices-validate.py "$SWIFT_STORAGE_DEVICES"
+
+# Validate SWIFT_PART_POWER, SWIFT_REPLICAS, SWIFT_MIN_PART_HOURS
+# just make sure they're numbers
+
+validate_number "SWIFT_PART_POWER" "$SWIFT_PART_POWER"
+validate_number "SWIFT_REPLICAS" "$SWIFT_REPLICAS"
+validate_number "SWIFT_MIN_PART_HOURS" "$SWIFT_MIN_PART_HOURS"
+
+# Make sure these aren't empty
+validate_non_empty "SWIFT_HASH_PATH_PREFIX" "$SWIFT_HASH_PATH_PREFIX"
+validate_non_empty "SWIFT_HASH_PATH_SUFFIX" "$SWIFT_HASH_PATH_SUFFIX"
+validate_non_empty "SWIFT_REBALANCE_SEED" "$SWIFT_REBALANCE_SEED"
+validate_non_empty "CONTROLLER_HOST_ADDRESS" "$CONTROLLER_HOST_ADDRESS"
+validate_non_empty "MANAGEMENT_INTERFACE_IP_ADDRESS" "$MANAGEMENT_INTERFACE_IP_ADDRESS"
+
+mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks
+
+# A swift controller needs the storage setup service
+# but does not want any of the other storage services enabled
+ln -s "/usr/lib/systemd/system/swift-storage-setup.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage-setup.service"
+
+SWIFT_CONTROLLER=${SWIFT_CONTROLLER:-False}
+
+if [[ $SWIFT_CONTROLLER = False ]]
+then
+ ln -s "/usr/lib/systemd/system/rsync.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/rsync.service"
+ ln -s "/usr/lib/systemd/system/swift-storage.service" \
+ "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage.service"
+fi
+
+# Build swift data structures (the rings)
+/usr/bin/ansible-playbook -i hosts swift-build-rings.yml
+
+cat << EOF > "$ROOT"/usr/share/swift/swift-storage-vars.yml
+---
+MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS
+SWIFT_HASH_PATH_PREFIX: $SWIFT_HASH_PATH_PREFIX
+SWIFT_HASH_PATH_SUFFIX: $SWIFT_HASH_PATH_SUFFIX
+EOF
diff --git a/old/extensions/sysroot.check b/old/extensions/sysroot.check
new file mode 100755
index 00000000..71b35175
--- /dev/null
+++ b/old/extensions/sysroot.check
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Preparatory checks for Morph 'sysroot' write extension
+
+set -eu
+
+if [ "$UPGRADE" == "yes" ]; then
+ echo >&2 "ERROR: Cannot upgrade a sysroot deployment"
+ exit 1
+fi
diff --git a/old/extensions/sysroot.write b/old/extensions/sysroot.write
new file mode 100755
index 00000000..46f1a780
--- /dev/null
+++ b/old/extensions/sysroot.write
@@ -0,0 +1,22 @@
+#!/bin/sh
+# Copyright (C) 2014,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# A Morph write extension to deploy to another directory
+
+set -eu
+
+mkdir -p "$2"
+
+cp -a "$1"/* "$2"
diff --git a/old/extensions/tar.check b/old/extensions/tar.check
new file mode 100755
index 00000000..ca5747fd
--- /dev/null
+++ b/old/extensions/tar.check
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Preparatory checks for Morph 'tar' write extension
+
+set -eu
+
+if [ "$UPGRADE" = "yes" ]; then
+ echo >&2 "ERROR: Cannot upgrade a tar file deployment."
+ exit 1
+fi
diff --git a/old/extensions/tar.write b/old/extensions/tar.write
new file mode 100755
index 00000000..01b545b4
--- /dev/null
+++ b/old/extensions/tar.write
@@ -0,0 +1,20 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# A Morph write extension to deploy to a .tar file
+
+set -eu
+
+tar -C "$1" -cf "$2" .
diff --git a/old/extensions/tar.write.help b/old/extensions/tar.write.help
new file mode 100644
index 00000000..b45c61fa
--- /dev/null
+++ b/old/extensions/tar.write.help
@@ -0,0 +1,19 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+ Create a .tar file of the deployed system.
+
+ The `location` argument is a pathname to the .tar file to be
+ created.
diff --git a/old/extensions/trove.configure b/old/extensions/trove.configure
new file mode 100755
index 00000000..c1cd8a65
--- /dev/null
+++ b/old/extensions/trove.configure
@@ -0,0 +1,172 @@
+#!/bin/sh
+#
+# Copyright (C) 2013 - 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# This is a "morph deploy" configuration extension to fully configure
+# a Trove instance at deployment time. It uses the following variables
+# from the environment (run `morph help trove.configure` to see a description
+# of them):
+#
+# * TROVE_ID
+# * TROVE_HOSTNAME (optional, defaults to TROVE_ID)
+# * TROVE_COMPANY
+# * LORRY_SSH_KEY
+# * UPSTREAM_TROVE
+# * UPSTREAM_TROVE_PROTOCOL
+# * TROVE_ADMIN_USER
+# * TROVE_ADMIN_EMAIL
+# * TROVE_ADMIN_NAME
+# * TROVE_ADMIN_SSH_PUBKEY
+# * LORRY_CONTROLLER_MINIONS (optional, defaults to 4)
+# * TROVE_BACKUP_KEYS - a space-separated list of paths to SSH keys.
+# (optional)
+# * TROVE_GENERIC (optional)
+#
+# The configuration of a Trove is slightly tricky: part of it has to
+# be run on the configured system after it has booted. We accomplish
+# this by copying in all the relevant data to the target system
+# (in /var/lib/trove-setup), and creating a systemd unit file that
+# runs on the first boot. The first boot will be detected by the
+# existence of the /var/lib/trove-setup/needed file.
+
+set -e
+
+if [ "$TROVE_GENERIC" ]
+then
+ echo "Not configuring the trove, it will be generic"
+ exit 0
+fi
+
+
+# Check that all the variables needed are present:
+
+error_vars=false
+if test "x$TROVE_ID" = "x"; then
+ echo "ERROR: TROVE_ID needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_COMPANY" = "x"; then
+ echo "ERROR: TROVE_COMPANY needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_USER" = "x"; then
+ echo "ERROR: TROVE_ADMIN_USER needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_NAME" = "x"; then
+ echo "ERROR: TROVE_ADMIN_NAME needs to be defined."
+ error_vars=true
+fi
+
+if test "x$TROVE_ADMIN_EMAIL" = "x"; then
+ echo "ERROR: TROVE_ADMIN_EMAIL needs to be defined."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $LORRY_SSH_KEY > /dev/null 2>&1
+then
+ echo "ERROR: LORRY_SSH_KEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $WORKER_SSH_PUBKEY > /dev/null 2>&1
+then
+ echo "ERROR: WORKER_SSH_PUBKEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if ! ssh-keygen -lf $TROVE_ADMIN_SSH_PUBKEY > /dev/null 2>&1
+then
+ echo "ERROR: TROVE_ADMIN_SSH_PUBKEY is not a vaild ssh key."
+ error_vars=true
+fi
+
+if "$error_vars"; then
+ exit 1
+fi
+
+ROOT="$1"
+
+
+TROVE_DATA="$ROOT/etc/trove"
+mkdir -p "$TROVE_DATA"
+
+# Install mandatory files
+install -m 0600 "$LORRY_SSH_KEY" "$TROVE_DATA/lorry.key"
+install -m 0644 "${LORRY_SSH_KEY}.pub" "$TROVE_DATA/lorry.key.pub"
+install -m 0644 "$TROVE_ADMIN_SSH_PUBKEY" "$TROVE_DATA/admin.key.pub"
+install -m 0644 "$WORKER_SSH_PUBKEY" "$TROVE_DATA/worker.key.pub"
+
+
+# Create base configuration file
+python <<'EOF' >"$TROVE_DATA/trove.conf"
+import os, sys, yaml
+
+trove_configuration={
+ 'TROVE_ID': os.environ['TROVE_ID'],
+ 'TROVE_COMPANY': os.environ['TROVE_COMPANY'],
+ 'TROVE_ADMIN_USER': os.environ['TROVE_ADMIN_USER'],
+ 'TROVE_ADMIN_EMAIL': os.environ['TROVE_ADMIN_EMAIL'],
+ 'TROVE_ADMIN_NAME': os.environ['TROVE_ADMIN_NAME'],
+ 'LORRY_SSH_KEY': '/etc/trove/lorry.key',
+ 'LORRY_SSH_PUBKEY': '/etc/trove/lorry.key.pub',
+ 'TROVE_ADMIN_SSH_PUBKEY': '/etc/trove/admin.key.pub',
+ 'WORKER_SSH_PUBKEY': '/etc/trove/worker.key.pub',
+}
+
+
+
+optional_keys = ('MASON_ID', 'HOSTNAME', 'TROVE_HOSTNAME',
+ 'LORRY_CONTROLLER_MINIONS', 'TROVE_BACKUP_KEYS',
+ 'UPSTREAM_TROVE', 'UPSTREAM_TROVE_PROTOCOL')
+
+for key in optional_keys:
+ if key in os.environ:
+ trove_configuration[key]=os.environ[key]
+
+yaml.dump(trove_configuration, sys.stdout, default_flow_style=False)
+EOF
+
+# Add backups configuration
+if [ -n "$TROVE_BACKUP_KEYS" ]; then
+ mkdir -p "$TROVE_DATA/backup-keys"
+ cp -- $TROVE_BACKUP_KEYS "$TROVE_DATA/backup-keys"
+ echo "TROVE_BACKUP_KEYS: /etc/trove/backup-keys/*" >> "$TROVE_DATA/trove.conf"
+fi
+
+# Add SSL configuration
+if test "x$TROVE_SSL_PEMFILE" != "x"; then
+ if test -f "$TROVE_SSL_PEMFILE"; then
+ install -m 0600 "$TROVE_SSL_PEMFILE" "$TROVE_DATA/trove-ssl-pemfile.pem"
+ echo "TROVE_SSL_PEMFILE: /etc/trove/trove-ssl-pemfile.pem" >> "$TROVE_DATA/trove.conf"
+ else
+ echo "ERROR: $TROVE_SSL_PEMFILE (TROVE_SSL_PEMFILE) doesn't exist."
+ exit 1
+ fi
+fi
+
+if test "x$TROVE_SSL_CA_FILE" != "x"; then
+ if test -f "$TROVE_SSL_CA_FILE"; then
+ install -m 0644 "$TROVE_SSL_CA_FILE" "$TROVE_DATA/trove-ssl-ca-file.pem"
+ echo "TROVE_SSL_CA_FILE: /etc/trove/trove-ssl-ca-file.pem" >> "$TROVE_DATA/trove.conf"
+ else
+ echo "ERROR: $TROVE_SSL_CA_FILE (TROVE_SSL_CA_FILE) doesn't exist."
+ exit 1
+ fi
+fi
diff --git a/old/extensions/trove.configure.help b/old/extensions/trove.configure.help
new file mode 100644
index 00000000..2669f693
--- /dev/null
+++ b/old/extensions/trove.configure.help
@@ -0,0 +1,134 @@
+help: |
+ This is a "morph deploy" configuration extension to fully configure
+ a Trove instance at deployment time. It uses the following
+ configuration variables:
+
+ * `TROVE_ID`
+ * `TROVE_HOSTNAME` (optional, defaults to `TROVE_ID`)
+ * `TROVE_COMPANY`
+ * `LORRY_SSH_KEY`
+ * `UPSTREAM_TROVE`
+ * `TROVE_ADMIN_USER`
+ * `TROVE_ADMIN_EMAIL`
+ * `TROVE_ADMIN_NAME`
+ * `TROVE_ADMIN_SSH_PUBKEY`
+ * `LORRY_CONTROLLER_MINIONS` (optional, defaults to 4)
+ * `TROVE_BACKUP_KEYS` - a space-separated list of paths to SSH keys.
+ (optional)
+ * `TROVE_SSL_PEMFILE` (optional)
+ * `TROVE_SSL_CA_FILE` (optional)
+
+ The variables are described in more detail below.
+
+ A Trove deployment needs to know the following things:
+
+ * The Trove's ID and public name.
+ * The Trove's administrator name and access details.
+ * Private and public SSH keys for the Lorry user on the Trove.
+ * Which upstream Trove it should be set to mirror upon initial deploy.
+
+ These are specified with the configuration variables described in this
+ help.
+
+ * `TROVE_GENERIC` -- boolean. If it's true the trove will be generic
+ and it won't be configured with any of the other variables listed
+ here.
+
+ * `TROVE_ID` -- the identifier of the Trove. This separates it from
+ other Troves, and allows mirroring of Troves to happen without local
+ changes getting overwritten.
+
+ The Trove ID is used in several ways. Any local repositories (those not
+ mirrored from elsewhere) get created under a prefix that is the ID.
+ Thus, the local repositories on the `git.baserock.org` Trove, whose
+ Trove ID is `baserock`, are named
+ `baserock/baserock/definitions.git` and similar. The ID is used
+ there twice: first as a prefix and then as a "project name" within
+ that prefix. There can be more projects under the prefix. For
+ example, there is a `baserock/local-config/lorries.git` repository,
+ where `local-config` is a separate project from `baserock`. Projects
+ here are a concept for the Trove's git access control language.
+
+ The Trove ID also used as the prefix for any branch and tag names
+ created locally for repositories that are not local. Thus, in the
+ `delta/linux.git` repository, any local branches would be called
+ something like `baserock/morph`, instead of just `morph`. The
+ Trove's git access control prevents normal uses from pushing
+ branches and tags that do not have the Trove ID as the prefix.
+
+ * `TROVE_HOSTNAME` -- the public name of the Trove. This is an
+ optional setting, and defaults to `TROVE_ID`. The public name is
+ typically the domain name of the server (e.g., `git.baserock.org`),
+ but can also be an IP address. This setting is used when Trove needs
+ to generate URLs that point to itself, such as the `git://` and
+ `http://` URLs for each git repository that is viewed via the web
+ interface.
+
+ Note that this is _not_ the system hostname. That is set separately,
+ with the `HOSTNAME` configuration setting (see the
+ `set-hostname.configure` extension).
+
+ * `TROVE_COMPANY` -- a description of the organisation who own the
+ Trove. This is shown in various parts of the web interface of the
+ Trove. It is for descriptive purposes only.
+
+ * `LORRY_SSH_KEY` -- ssh key pair that the Trove's Lorry will use to
+ access an upstream Trove, and to push updates to the Trove's git
+ server.
+
+ The value is a filename on the system doing the deployment (where
+ `morph deploy` is run). The file contains the _private_ key, and the
+ public key is in a file with the `.pub` suffix added to the name.
+
+ The upstream Trove needs to be configured to allow this key to
+ access it. This configuration does not do that automatically.
+
+ * `UPSTREAM_TROVE` -- public name of the upstream Trove (domain
+ name or IP address). This is an optional setting. If it's set,
+ the new Trove will be configured to mirror that Trove.
+
+ * `TROVE_ADMIN_USER`, `TROVE_ADMIN_EMAIL`, `TROVE_ADMIN_NAME`,
+ `TROVE_ADMIN_SSH_PUBKEY` -- details of the Trove's (initial)
+ administrator.
+
+ Each Trove needs at least one administrator user, and one is created
+ upon initial deployment. `TROVE_ADMIN_USER` is the username of the
+ account to be created, `TROVE_ADMIN_EMAIL` should be the e-mail of
+ the user, and `TROVE_ADMIN_NAME` is their name. If more
+ administrators are needed, the initial person should create them
+ using the usual Gitano commands.
+
+ * `LORRY_CONTROLLER_MINIONS` -- the number of Lorry Controller worker
+ processes to start. This is an optional setting and defaults to 4.
+ The more workers are running, the more Lorry jobs can run at the same
+ time, but the more resources they require.
+
+ * `TROVE_BACKUP_KEYS` -- a space-separated list of paths to SSH keys.
+ If this is set, the Trove will have a backup user that can be accessed
+ with rsync using the SSH keys provided.
+
+ * `TROVE_SSL_PEMFILE` -- SSL certificate to use in lighttpd SSL
+ configuration.
+
+ * `TROVE_SSL_CA_FILE` -- CA chain certificate to use in lighttpd SSL
+ configuration.
+
+ Example
+ -------
+
+ The following set of variables could be to deploy a Trove instance:
+
+ TROVE_ID: my-trove
+ TROVE_HOSTNAME: my-trove.example.com
+ TROVE_COMPANY: My Personal Trove for Me, Myself and I
+ LORRY_SSH_KEY: my-trove/lorry.key
+ UPSTREAM_TROVE: git.baserock.org
+ UPSTREAM_TROVE_USER: my-trove
+ UPSTREAM_TROVE_EMAIL: my-trove@example.com
+ TROVE_ADMIN_USER: tomjon
+ TROVE_ADMIN_EMAIL: tomjon@example.com
+ TROVE_ADMIN_NAME: Tomjon of Lancre
+ TROVE_ADMIN_SSH_PUBKEY: my-trove/tomjon.key.pub
+
+ These would be put into the cluster morphology used to do the
+ deployment.
diff --git a/old/extensions/vagrant.configure b/old/extensions/vagrant.configure
new file mode 100644
index 00000000..abc3ea0c
--- /dev/null
+++ b/old/extensions/vagrant.configure
@@ -0,0 +1,55 @@
+#!/bin/sh
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.5
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+if test "x$VAGRANT" = "x"; then
+ exit 0
+fi
+
+for needed in etc/ssh/sshd_config etc/sudoers; do
+ if ! test -e "$ROOT/$needed"; then
+ echo >&2 "Unable to find $needed"
+ echo >&2 "Cannot continue configuring as Vagrant basebox"
+ exit 1
+ fi
+done
+
+# SSH daemon needs to be configured to not use DNS...
+sed -i -e's/^(.*[Uu]][Ss][Ee][Dd][Nn][Ss].*)$/#\1/' "$ROOT/etc/ssh/sshd_config"
+echo "UseDNS no" >> "$ROOT/etc/ssh/sshd_config"
+
+# We need to add a vagrant user with "vagrant" as the password We're doing this
+# manually because chrooting in to run adduser is not really allowed for
+# deployment time since we wouldn't be able to run the adduser necessarily. In
+# practice for now we'd be able to because we can't deploy raw disks
+# cross-platform and expect extlinux to install but we won't, for good
+# practice and to hilight this deficiency.
+echo 'vagrant:x:1000:1000:Vagrant User:/home/vagrant:/bin/bash' >> "$ROOT/etc/passwd"
+echo 'vagrant:/6PTOoWylhw3w:16198:0:99999:7:::' >> "$ROOT/etc/shadow"
+echo 'vagrant:x:1000:' >> "$ROOT/etc/group"
+mkdir -p "$ROOT/home/vagrant"
+chown -R 1000:1000 "$ROOT/home/vagrant"
+
+# Next, the vagrant user is meant to have sudo access
+echo 'vagrant ALL=(ALL) NOPASSWD: ALL' >> "$ROOT/etc/sudoers"
+
+# And ensure that we get sbin in our path
+echo 'PATH="$PATH:/sbin:/usr/sbin"' >> "$ROOT/etc/profile"
+echo 'export PATH' >> "$ROOT/etc/profile"
+
diff --git a/old/extensions/vdaboot.configure b/old/extensions/vdaboot.configure
new file mode 100755
index 00000000..60de925b
--- /dev/null
+++ b/old/extensions/vdaboot.configure
@@ -0,0 +1,33 @@
+#!/bin/sh
+# Copyright (C) 2013,2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Change the "/" mount point to /dev/vda to use virtio disks.
+
+set -e
+
+if [ "$OPENSTACK_USER" ]
+then
+ # Modifying fstab
+ if [ -f "$1/etc/fstab" ]
+ then
+ mv "$1/etc/fstab" "$1/etc/fstab.old"
+ awk 'BEGIN {print "/dev/vda / btrfs defaults,rw,noatime 0 1"};
+ $2 != "/" {print $0 };' "$1/etc/fstab.old" > "$1/etc/fstab"
+ rm "$1/etc/fstab.old"
+ else
+ echo "/dev/vda / btrfs defaults,rw,noatime 0 1"> "$1/etc/fstab"
+ fi
+fi
diff --git a/old/extensions/virtualbox-ssh.check b/old/extensions/virtualbox-ssh.check
new file mode 100755
index 00000000..215c8b30
--- /dev/null
+++ b/old/extensions/virtualbox-ssh.check
@@ -0,0 +1,36 @@
+#!/usr/bin/python2
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''Preparatory checks for Morph 'virtualbox-ssh' write extension'''
+
+
+import writeexts
+
+
+class VirtualBoxPlusSshCheckExtension(writeexts.WriteExtension):
+ def process_args(self, args):
+ if len(args) != 1:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+ upgrade = self.get_environment_boolean('UPGRADE')
+ if upgrade:
+ raise writeexts.ExtensionError(
+ 'Use the `ssh-rsync` write extension to deploy upgrades to an '
+ 'existing remote system.')
+
+VirtualBoxPlusSshCheckExtension().run()
diff --git a/old/extensions/virtualbox-ssh.write b/old/extensions/virtualbox-ssh.write
new file mode 100755
index 00000000..56c0bb57
--- /dev/null
+++ b/old/extensions/virtualbox-ssh.write
@@ -0,0 +1,219 @@
+#!/usr/bin/python2
+# Copyright (C) 2012-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+'''A Morph deployment write extension for deploying to VirtualBox via ssh.
+
+VirtualBox is assumed to be running on a remote machine, which is
+accessed over ssh. The machine gets created, but not started.
+
+See file virtualbox-ssh.write.help for documentation
+
+'''
+
+
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import time
+import urlparse
+
+import writeexts
+
+
+class VirtualBoxPlusSshWriteExtension(writeexts.WriteExtension):
+
+ def process_args(self, args):
+ if len(args) != 2:
+ raise writeexts.ExtensionError(
+ 'Wrong number of command line args')
+
+ temp_root, location = args
+ ssh_host, vm_name, vdi_path = self.parse_location(location)
+ autostart = self.get_environment_boolean('AUTOSTART')
+
+ vagrant = self.get_environment_boolean('VAGRANT')
+
+ fd, raw_disk = tempfile.mkstemp()
+ os.close(fd)
+ self.create_local_system(temp_root, raw_disk)
+
+ try:
+ self.transfer_and_convert_to_vdi(
+ raw_disk, ssh_host, vdi_path)
+ self.create_virtualbox_guest(ssh_host, vm_name, vdi_path,
+ autostart, vagrant)
+ except BaseException:
+ sys.stderr.write('Error deploying to VirtualBox')
+ os.remove(raw_disk)
+ writeexts.ssh_runcmd(ssh_host, ['rm', '-f', vdi_path])
+ raise
+ else:
+ os.remove(raw_disk)
+ self.status(
+ msg='Virtual machine %(vm_name)s has been created',
+ vm_name=vm_name)
+
+ def parse_location(self, location):
+ '''Parse the location argument to get relevant data.'''
+
+ x = urlparse.urlparse(location)
+ if x.scheme != 'vbox+ssh':
+ raise writeexts.ExtensionError(
+ 'URL schema must be vbox+ssh in %s' % location)
+ m = re.match('^/(?P<guest>[^/]+)(?P<path>/.+)$', x.path)
+ if not m:
+ raise writeexts.ExtensionError(
+ 'Cannot parse location %s' % location)
+ return x.netloc, m.group('guest'), m.group('path')
+
+ def transfer_and_convert_to_vdi(self, raw_disk, ssh_host, vdi_path):
+ '''Transfer raw disk image to VirtualBox host, and convert to VDI.'''
+
+ self.status(msg='Transfer disk and convert to VDI')
+
+ st = os.lstat(raw_disk)
+ # TODO: Something!
+ xfer_hole_path = writeexts.get_data_path('xfer-hole')
+ recv_hole = writeexts.get_data('recv-hole')
+
+ ssh_remote_cmd = [
+ 'sh', '-c', recv_hole,
+ 'dummy-argv0', 'vbox', vdi_path, str(st.st_size),
+ ]
+
+ xfer_hole_proc = subprocess.Popen(
+ ['python', xfer_hole_path, raw_disk],
+ stdout=subprocess.PIPE)
+ recv_hole_proc = subprocess.Popen(
+ ['ssh', ssh_host] + map(writeexts.shell_quote, ssh_remote_cmd),
+ stdin=xfer_hole_proc.stdout)
+ xfer_hole_proc.stdout.close()
+ recv_hole_proc.communicate()
+
+ def virtualbox_version(self, ssh_host):
+ 'Get the version number of the VirtualBox running on the remote host.'
+
+ # --version gives a build id, which looks something like
+ # 1.2.3r456789, so we need to strip the suffix off and get a tuple
+ # of the (major, minor, patch) version, since comparing with a
+ # tuple is more reliable than a string and more convenient than
+ # comparing against the major, minor and patch numbers directly
+ self.status(msg='Checking version of remote VirtualBox')
+ build_id = writeexts.ssh_runcmd(ssh_host,
+ ['VBoxManage', '--version'])
+ version_string = re.match(r"^([0-9\.]+).*$", build_id.strip()).group(1)
+ return tuple(int(s or '0') for s in version_string.split('.'))
+
+ def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart,
+ vagrant):
+ '''Create the VirtualBox virtual machine.'''
+
+ self.status(msg='Create VirtualBox virtual machine')
+
+ ram_mebibytes = str(self.get_ram_size() / (1024**2))
+
+ vcpu_count = str(self.get_vcpu_count())
+
+ if not vagrant:
+ hostonly_iface = self.get_host_interface(ssh_host)
+
+ if self.virtualbox_version(ssh_host) < (4, 3, 0):
+ sataportcount_option = '--sataportcount'
+ else:
+ sataportcount_option = '--portcount'
+
+ commands = [
+ ['createvm', '--name', vm_name, '--ostype', 'Linux26_64',
+ '--register'],
+ ['modifyvm', vm_name, '--ioapic', 'on',
+ '--memory', ram_mebibytes, '--cpus', vcpu_count],
+ ['storagectl', vm_name, '--name', 'SATA Controller',
+ '--add', 'sata', '--bootable', 'on', sataportcount_option, '2'],
+ ['storageattach', vm_name, '--storagectl', 'SATA Controller',
+ '--port', '0', '--device', '0', '--type', 'hdd', '--medium',
+ vdi_path],
+ ]
+ if vagrant:
+ commands[1].extend(['--nic1', 'nat',
+ '--natnet1', 'default'])
+ else:
+ commands[1].extend(['--nic1', 'hostonly',
+ '--hostonlyadapter1', hostonly_iface,
+ '--nic2', 'nat', '--natnet2', 'default'])
+
+ attach_disks = self.parse_attach_disks()
+ for device_no, disk in enumerate(attach_disks, 1):
+ cmd = ['storageattach', vm_name,
+ '--storagectl', 'SATA Controller',
+ '--port', str(device_no),
+ '--device', '0',
+ '--type', 'hdd',
+ '--medium', disk]
+ commands.append(cmd)
+
+ if autostart:
+ commands.append(['startvm', vm_name])
+
+ for command in commands:
+ argv = ['VBoxManage'] + command
+ writeexts.ssh_runcmd(ssh_host, argv)
+
+ def get_host_interface(self, ssh_host):
+ host_ipaddr = os.environ.get('HOST_IPADDR')
+ netmask = os.environ.get('NETMASK')
+
+ if host_ipaddr is None:
+ raise writeexts.ExtensionError('HOST_IPADDR was not given')
+
+ if netmask is None:
+ raise writeexts.ExtensionError('NETMASK was not given')
+
+ # 'VBoxManage list hostonlyifs' retrieves a list with the hostonly
+ # interfaces on the host. For each interface, the following lines
+ # are shown on top:
+ #
+ # Name: vboxnet0
+ # GUID: 786f6276-656e-4074-8000-0a0027000000
+ # Dhcp: Disabled
+ # IPAddress: 192.168.100.1
+ #
+ # The following command tries to retrieve the hostonly interface
+ # name (e.g. vboxnet0) associated with the given ip address.
+ iface = None
+ lines = writeexts.ssh_runcmd(ssh_host,
+ ['VBoxManage', 'list', 'hostonlyifs']).splitlines()
+ for i, v in enumerate(lines):
+ if host_ipaddr in v:
+ iface = lines[i-3].split()[1]
+ break
+
+ if iface is None:
+ iface = writeexts.ssh_runcmd(ssh_host,
+ ['VBoxManage', 'hostonlyif', 'create'])
+ # 'VBoxManage hostonlyif create' shows the name of the
+ # created hostonly interface inside single quotes
+ iface = iface[iface.find("'") + 1 : iface.rfind("'")]
+ writeexts.ssh_runcmd(ssh_host,
+ ['VBoxManage', 'hostonlyif',
+ 'ipconfig', iface,
+ '--ip', host_ipaddr,
+ '--netmask', netmask])
+
+ return iface
+
+VirtualBoxPlusSshWriteExtension().run()
diff --git a/old/extensions/virtualbox-ssh.write.help b/old/extensions/virtualbox-ssh.write.help
new file mode 100644
index 00000000..2dbf988c
--- /dev/null
+++ b/old/extensions/virtualbox-ssh.write.help
@@ -0,0 +1,135 @@
+# Copyright (C) 2014, 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+help: |
+
+ Deploy a Baserock system as a *new* VirtualBox virtual machine.
+ (Use the `ssh-rsync` write extension to deploy upgrades to an *existing*
+ VM)
+
+ Connects to HOST via ssh to run VirtualBox's command line management tools.
+
+ Parameters:
+
+ * location: a custom URL scheme of the form `vbox+ssh://HOST/GUEST/PATH`,
+ where:
+ * HOST is the name of the host on which VirtualBox is running
+ * GUEST is the name of the guest VM on that host
+ * PATH is the path to the disk image that should be created,
+ on that host. For example,
+ `vbox+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where
+ * `alice@192.168.122.1` is the target host as given to ssh,
+ **from within the development host** (which may be
+ different from the target host's normal address);
+ * `testsys` is the name of the new guest VM';
+ * `/home/alice/testys.img` is the pathname of the disk image files
+ on the target host.
+
+ * HOSTNAME=name: the hostname of the **guest** VM within the network into
+ which it is being deployed.
+
+ * DISK_SIZE=X: **(MANDATORY)** the size of the VM's primary virtual hard
+ disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower
+ case) to indicate kilo-, mega-, or gigabytes. For example,
+ `DISK_SIZE=100G` would create a 100 gigabyte virtual hard disk.
+
+ * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate
+ for itself from the host. `X` is interpreted in the same as for
+ DISK_SIZE, and defaults to `1G`.
+
+ * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do
+ not use more CPU cores than you have available physically (real cores,
+ no hyperthreads).
+
+ * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to
+ tell Linux to use, rather than booting the rootfs directly.
+
+ * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree
+ binary - Give the full path (without a leading /) to the location of the
+ DTB in the built system image . The deployment will fail if `path` does
+ not exist.
+
+ * BOOTLOADER_INSTALL=value: the bootloader to be installed
+ **(MANDATORY)** for non-x86 systems
+
+ allowed values =
+ - 'extlinux' (default) - the extlinux bootloader will
+ be installed
+ - 'none' - no bootloader will be installed by `morph deploy`. A
+ bootloader must be installed manually. This value must be used when
+ deploying non-x86 systems such as ARM.
+
+ * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used.
+ If not specified for x86-32 and x86-64 systems, 'extlinux' will be used
+
+ allowed values =
+ - 'extlinux'
+
+ * KERNEL_ARGS=args: optional additional kernel command-line parameters to
+ be appended to the default set. The default set is:
+
+ 'rw init=/sbin/init rootfstype=btrfs \
+ rootflags=subvol=systems/default/run \
+ root=[name or UUID of root filesystem]'
+
+ (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt)
+
+ * AUTOSTART=<VALUE> - boolean. If it is set, the VM will be started when
+ it has been deployed.
+
+ * VAGRANT=<VALUE> - boolean. If it is set, then networking is configured
+ so that the VM will work with Vagrant. Otherwise networking is
+ configured to run directly in VirtualBox.
+
+ * HOST_IPADDR=<ip_address> - the IP address of the VM host.
+
+ * NETMASK=<netmask> - the netmask of the VM host.
+
+ * NETWORK_CONFIG=<net_config> - `net_config` is used to set up the VM's
+ network interfaces. It is a string containing semi-colon separated
+ 'stanzas' where each stanza provides information about a network
+ interface. Each stanza is of the form name:type[,arg=value] e.g.
+
+ lo:loopback
+ eth0:dhcp
+ eth1:static,address=10.0.0.1,netmask=255.255.0.0
+
+ An example of the NETWORK_CONFIG parameter (It should be in one line)
+
+ `"lo:loopback;eth0:static,address=192.168.100.2,netmask=255.255.255.0;
+ eth1:dhcp,hostname=$(hostname)"`
+
+ It is useful to configure one interface to use NAT to give the VM access
+ to the outside world and another interface to use the Virtual Box host
+ adapter to allow you to access the Trove from the host machine.
+
+ The NAT interface eth1 is set up to use dhcp, the host-only adapter
+ interface is configured statically.
+
+ Note: you must give the host-only adapter interface an address that lies
+ **on the same network** as the host adapter. So if the host adapter has
+ an IP of 192.168.100.1 eth0 should have an address such as
+ 192.168.100.42.
+
+ The settings of the host adapter, including its IP can be changed either
+ in the VirtualBox manager UI
+ (https://www.virtualbox.org/manual/ch03.html#settings-network)
+ or via the VBoxManage command line
+ (https://www.virtualbox.org/manual/ch08.html#idp57572192)
+
+ See Chapter 6 of the VirtualBox User Manual for more information about
+ virtual networking (https://www.virtualbox.org/manual/ch06.html)
+
+ (See `morph help deploy` for details of how to pass parameters to write
+ extensions)
diff --git a/old/extensions/writeexts.py b/old/extensions/writeexts.py
new file mode 100644
index 00000000..5b79093b
--- /dev/null
+++ b/old/extensions/writeexts.py
@@ -0,0 +1,1072 @@
+#!/usr/bin/python2
+# Copyright (C) 2012-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import contextlib
+import errno
+import logging
+import os
+import re
+import shutil
+import stat
+import subprocess
+import sys
+import time
+import tempfile
+
+import partitioning
+import pyfdisk
+import writeexts
+
+
+if sys.version_info >= (3, 3, 0):
+ import shlex
+ shell_quote = shlex.quote
+else:
+ import pipes
+ shell_quote = pipes.quote
+
+
+def get_data_path(relative_path):
+ extensions_dir = os.path.dirname(__file__)
+ return os.path.join(extensions_dir, relative_path)
+
+
+def get_data(relative_path):
+ with open(get_data_path(relative_path)) as f:
+ return f.read()
+
+
+def ssh_runcmd(host, args, **kwargs):
+ '''Run command over ssh'''
+ command = ['ssh', host, '--'] + [shell_quote(arg) for arg in args]
+
+ feed_stdin = kwargs.get('feed_stdin')
+ stdin = kwargs.get('stdin', subprocess.PIPE)
+ stdout = kwargs.get('stdout', subprocess.PIPE)
+ stderr = kwargs.get('stderr', subprocess.PIPE)
+
+ p = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr)
+ out, err = p.communicate(input=feed_stdin)
+ if p.returncode != 0:
+ raise ExtensionError('ssh command `%s` failed' % ' '.join(command))
+ return out
+
+
+def write_from_dict(filepath, d, validate=lambda x, y: True):
+ """Takes a dictionary and appends the contents to a file
+
+ An optional validation callback can be passed to perform validation on
+ each value in the dictionary.
+
+ e.g.
+
+ def validation_callback(dictionary_key, dictionary_value):
+ if not dictionary_value.isdigit():
+ raise Exception('value contains non-digit character(s)')
+
+ Any callback supplied to this function should raise an exception
+ if validation fails.
+
+ """
+ # Sort items asciibetically
+ # the output of the deployment should not depend
+ # on the locale of the machine running the deployment
+ items = sorted(d.iteritems(), key=lambda (k, v): [ord(c) for c in v])
+
+ for (k, v) in items:
+ validate(k, v)
+
+ with open(filepath, 'a') as f:
+ for (_, v) in items:
+ f.write('%s\n' % v)
+
+ os.fchown(f.fileno(), 0, 0)
+ os.fchmod(f.fileno(), 0644)
+
+
+def parse_environment_pairs(env, pairs):
+ '''Add key=value pairs to the environment dict.
+
+ Given a dict and a list of strings of the form key=value,
+ set dict[key] = value, unless key is already set in the
+ environment, at which point raise an exception.
+
+ This does not modify the passed in dict.
+
+ Returns the extended dict.
+
+ '''
+ extra_env = dict(p.split('=', 1) for p in pairs)
+ conflicting = [k for k in extra_env if k in env]
+ if conflicting:
+ raise ExtensionError('Environment already set: %s'
+ % ', '.join(conflicting))
+
+ # Return a dict that is the union of the two
+ # This is not the most performant, since it creates
+ # 3 unnecessary lists, but I felt this was the most
+ # easy to read. Using itertools.chain may be more efficicent
+ return dict(env.items() + extra_env.items())
+
+
+class ExtensionError(Exception):
+
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class Fstab(object):
+ '''Small helper class for parsing and adding lines to /etc/fstab.'''
+
+ # There is an existing Python helper library for editing of /etc/fstab.
+ # However it is unmaintained and has an incompatible license (GPL3).
+ #
+ # https://code.launchpad.net/~computer-janitor-hackers/python-fstab/trunk
+
+ def __init__(self, filepath='/etc/fstab'):
+ if os.path.exists(filepath):
+ with open(filepath, 'r') as f:
+ self.text= f.read()
+ else:
+ self.text = ''
+ self.filepath = filepath
+ self.lines_added = 0
+
+ def get_mounts(self):
+ '''Return list of mount devices and targets in /etc/fstab.
+
+ Return value is a dict of target -> device.
+ '''
+ mounts = dict()
+ for line in self.text.splitlines():
+ words = line.split()
+ if len(words) >= 2 and not words[0].startswith('#'):
+ device, target = words[0:2]
+ mounts[target] = device
+ return mounts
+
+ def add_line(self, line):
+ '''Add a new entry to /etc/fstab.
+
+ Lines are appended, and separated from any entries made by configure
+ extensions with a comment.
+
+ '''
+ if self.lines_added == 0:
+ if len(self.text) == 0 or self.text[-1] is not '\n':
+ self.text += '\n'
+ self.text += '# Morph default system layout\n'
+ self.lines_added += 1
+
+ self.text += line + '\n'
+
+ def write(self):
+ '''Rewrite the fstab file to include all new entries.'''
+ with tempfile.NamedTemporaryFile(delete=False) as f:
+ f.write(self.text)
+ tmp = f.name
+ shutil.move(os.path.abspath(tmp), os.path.abspath(self.filepath))
+
+
+class Extension(object):
+
+ '''A base class for deployment extensions.
+
+ A subclass should subclass this class, and add a
+ ``process_args`` method.
+
+ Note that it is not necessary to subclass this class for write
+ extensions. This class is here just to collect common code for
+ write extensions.
+
+ '''
+
+ def setup_logging(self):
+ '''Direct all logging output to MORPH_LOG_FD, if set.
+
+ This file descriptor is read by Morph and written into its own log
+ file.
+
+ '''
+ log_write_fd = int(os.environ.get('MORPH_LOG_FD', 0))
+
+ if log_write_fd == 0:
+ return
+
+ formatter = logging.Formatter('%(message)s')
+
+ handler = logging.StreamHandler(os.fdopen(log_write_fd, 'w'))
+ handler.setFormatter(formatter)
+
+ logger = logging.getLogger()
+ logger.addHandler(handler)
+ logger.setLevel(logging.DEBUG)
+
+ def process_args(self, args):
+ raise NotImplementedError()
+
+ def run(self, args=None):
+ if args is None:
+ args = sys.argv[1:]
+ try:
+ self.setup_logging()
+ self.process_args(args)
+ except ExtensionError as e:
+ sys.stdout.write('ERROR: %s\n' % e)
+ sys.exit(1)
+
+ @staticmethod
+ def status(**kwargs):
+ '''Provide status output.
+
+ The ``msg`` keyword argument is the actual message,
+ the rest are values for fields in the message as interpolated
+ by %.
+
+ '''
+ sys.stdout.write('%s\n' % (kwargs['msg'] % kwargs))
+ sys.stdout.flush()
+
+
+class WriteExtension(Extension):
+
+ '''A base class for deployment write extensions.
+
+ A subclass should subclass this class, and add a
+ ``process_args`` method.
+
+ Note that it is not necessary to subclass this class for write
+ extensions. This class is here just to collect common code for
+ write extensions.
+
+ '''
+
+ def check_for_btrfs_in_deployment_host_kernel(self):
+ with open('/proc/filesystems') as f:
+ text = f.read()
+ return '\tbtrfs\n' in text
+
+ def require_btrfs_in_deployment_host_kernel(self):
+ if not self.check_for_btrfs_in_deployment_host_kernel():
+ raise ExtensionError(
+ 'Error: Btrfs is required for this deployment, but was not '
+ 'detected in the kernel of the machine that is running Morph.')
+
+ def create_local_system(self, temp_root, location):
+ '''Create a raw system image locally.'''
+
+ with self.created_disk_image(location):
+ self.create_baserock_system(temp_root, location)
+
+ def create_baserock_system(self, temp_root, location):
+ if self.get_environment_boolean('USE_PARTITIONING', 'no'):
+ self.create_partitioned_system(temp_root, location)
+ else:
+ self.format_btrfs(location)
+ self.create_unpartitioned_system(temp_root, location)
+
+ @contextlib.contextmanager
+ def created_disk_image(self, location):
+ size = self.get_disk_size()
+ if not size:
+ raise ExtensionError('DISK_SIZE is not defined')
+ self.create_raw_disk_image(location, size)
+ try:
+ yield
+ except BaseException:
+ os.unlink(location)
+ raise
+
+ def format_btrfs(self, raw_disk):
+ try:
+ self.mkfs_btrfs(raw_disk)
+ except BaseException:
+ sys.stderr.write('Error creating disk image')
+ raise
+
+ def create_unpartitioned_system(self, temp_root, raw_disk):
+ '''Deploy a bootable Baserock system within a single Btrfs filesystem.
+
+ Called if USE_PARTITIONING=no (the default) is set in the deployment
+ options.
+
+ '''
+ with self.mount(raw_disk) as mp:
+ try:
+ self.create_versioned_layout(mp, version_label='factory')
+ self.create_btrfs_system_rootfs(
+ temp_root, mp, version_label='factory',
+ rootfs_uuid=self.get_uuid(raw_disk))
+ if self.bootloader_config_is_wanted():
+ self.create_bootloader_config(
+ temp_root, mp, version_label='factory',
+ rootfs_uuid=self.get_uuid(raw_disk))
+ except BaseException:
+ sys.stderr.write('Error creating Btrfs system layout')
+ raise
+
+ def _parse_size(self, size):
+ '''Parse a size from a string.
+
+ Return size in bytes.
+
+ '''
+
+ m = re.match('^(\d+)([kmgKMG]?)$', size)
+ if not m:
+ return None
+
+ factors = {
+ '': 1,
+ 'k': 1024,
+ 'm': 1024**2,
+ 'g': 1024**3,
+ }
+ factor = factors[m.group(2).lower()]
+
+ return int(m.group(1)) * factor
+
+ def _parse_size_from_environment(self, env_var, default):
+ '''Parse a size from an environment variable.'''
+
+ size = os.environ.get(env_var, default)
+ if size is None:
+ return None
+ bytes = self._parse_size(size)
+ if bytes is None:
+ raise ExtensionError('Cannot parse %s value %s'
+ % (env_var, size))
+ return bytes
+
+ def get_disk_size(self):
+ '''Parse disk size from environment.'''
+ return self._parse_size_from_environment('DISK_SIZE', None)
+
+ def get_ram_size(self):
+ '''Parse RAM size from environment.'''
+ return self._parse_size_from_environment('RAM_SIZE', '1G')
+
+ def get_vcpu_count(self):
+ '''Parse the virtual cpu count from environment.'''
+ return self._parse_size_from_environment('VCPUS', '1')
+
+ def create_raw_disk_image(self, filename, size):
+ '''Create a raw disk image.'''
+
+ self.status(msg='Creating empty disk image')
+ with open(filename, 'wb') as f:
+ if size > 0:
+ f.seek(size-1)
+ f.write('\0')
+
+ def mkfs_btrfs(self, location):
+ '''Create a btrfs filesystem on the disk.'''
+
+ self.status(msg='Creating btrfs filesystem')
+ try:
+ # The following command disables some new filesystem features. We
+ # need to do this because at the time of writing, SYSLINUX has not
+ # been updated to understand these new features and will fail to
+ # boot if the kernel is on a filesystem where they are enabled.
+ subprocess.check_output(
+ ['mkfs.btrfs','-f', '-L', 'baserock',
+ '--features', '^extref',
+ '--features', '^skinny-metadata',
+ '--features', '^mixed-bg',
+ '--nodesize', '4096',
+ location], stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ if 'unrecognized option \'--features\'' in e.output:
+ # Old versions of mkfs.btrfs (including v0.20, present in many
+ # Baserock releases) don't support the --features option, but
+ # also don't enable the new features by default. So we can
+ # still create a bootable system in this situation.
+ logging.debug(
+ 'Assuming mkfs.btrfs failure was because the tool is too '
+ 'old to have --features flag.')
+ subprocess.check_call(['mkfs.btrfs','-f',
+ '-L', 'baserock', location])
+ else:
+ raise
+
+ def get_uuid(self, location, offset=0):
+ '''Get the filesystem UUID of a block device's file system.
+
+ Requires util-linux blkid; the busybox version ignores options and
+ lies by exiting successfully.
+
+ Args:
+ location: Path of device or image to inspect
+ offset: A byte offset - which should point to the start of a
+ partition containing a filesystem
+ '''
+
+ return subprocess.check_output(['blkid', '-s', 'UUID', '-o',
+ 'value', '-p', '-O', str(offset),
+ location]).strip()
+
+ @contextlib.contextmanager
+ def mount(self, location):
+ self.status(msg='Mounting filesystem')
+ try:
+ mount_point = tempfile.mkdtemp()
+ if self.is_device(location):
+ subprocess.check_call(['mount', location, mount_point])
+ else:
+ subprocess.check_call(['mount', '-o', 'loop',
+ location, mount_point])
+ except BaseException:
+ sys.stderr.write('Error mounting filesystem')
+ os.rmdir(mount_point)
+ raise
+ try:
+ yield mount_point
+ finally:
+ self.status(msg='Unmounting filesystem')
+ subprocess.check_call(['umount', mount_point])
+ os.rmdir(mount_point)
+
+ def create_versioned_layout(self, mountpoint, version_label):
+ '''Create a versioned directory structure within a partition.
+
+ The Baserock project has defined a 'reference upgrade mechanism'. This
+ mandates a specific directory layout. It consists of a toplevel
+ '/systems' directory, containing subdirectories named with a 'version
+ label'. These subdirectories contain the actual OS content.
+
+ For the root file system, a Btrfs partition must be used. For each
+ version, two subvolumes are created: 'orig' and 'run'. This is handled
+ in create_btrfs_system_rootfs().
+
+ Other partitions (e.g. /boot) can also follow the same layout. In the
+ case of /boot, content goes directly in the version directory. That
+ means there are no 'orig' and 'run' subvolumes, which avoids the
+ need to use Btrfs.
+
+ The `system-version-manager` tool from tbdiff.git is responsible for
+ deploying live upgrades, and it understands this layout.
+
+ '''
+ version_root = os.path.join(mountpoint, 'systems', version_label)
+
+ os.makedirs(version_root)
+ os.symlink(
+ version_label, os.path.join(mountpoint, 'systems', 'default'))
+
+ def create_btrfs_system_rootfs(self, temp_root, mountpoint, version_label,
+ rootfs_uuid, device=None):
+ '''Separate base OS versions from state using subvolumes.
+
+ The 'device' parameter should be a pyfdisk.Device instance,
+ as returned by partitioning.do_partitioning(), that describes the
+ partition layout of the target device. This is used to set up
+ mountpoints in the root partition for the other partitions.
+ If no 'device' instance is passed, no mountpoints are set up in the
+ rootfs.
+
+ '''
+ version_root = os.path.join(mountpoint, 'systems', version_label)
+ state_root = os.path.join(mountpoint, 'state')
+ os.makedirs(state_root)
+
+ system_dir = self.create_orig(version_root, temp_root)
+ state_dirs = self.complete_fstab_for_btrfs_layout(system_dir,
+ rootfs_uuid, device)
+
+ for state_dir in state_dirs:
+ self.create_state_subvolume(system_dir, mountpoint, state_dir)
+
+ self.create_run(version_root)
+
+ if device:
+ self.create_partition_mountpoints(device, system_dir)
+
+ def create_bootloader_config(self, temp_root, mountpoint, version_label,
+ rootfs_uuid, device=None):
+ '''Setup the bootloader.
+
+ '''
+ initramfs = self.find_initramfs(temp_root)
+ version_root = os.path.join(mountpoint, 'systems', version_label)
+
+ self.install_kernel(version_root, temp_root)
+ if self.get_dtb_path() != '':
+ self.install_dtb(version_root, temp_root)
+ self.install_syslinux_menu(mountpoint, temp_root)
+ if initramfs is not None:
+ # Using initramfs - can boot a rootfs with a filesystem UUID
+ self.install_initramfs(initramfs, version_root)
+ self.generate_bootloader_config(mountpoint,
+ rootfs_uuid=rootfs_uuid)
+ else:
+ if device:
+ # A partitioned disk or image - boot with partition UUID
+ root_part = device.get_partition_by_mountpoint('/')
+ root_guid = device.get_partition_uuid(root_part)
+ self.generate_bootloader_config(mountpoint,
+ root_guid=root_guid)
+ else:
+ # Unpartitioned and no initramfs - cannot boot with a UUID
+ self.generate_bootloader_config(mountpoint)
+ self.install_bootloader(mountpoint)
+
+ def create_partition_mountpoints(self, device, system_dir):
+ '''Create (or empty) partition mountpoints in the root filesystem
+
+ Delete contents of partition mountpoints in the rootfs to leave an
+ empty mount drectory (files are copied to the actual partition in
+ create_partitioned_system()), or create an empty mount directory in
+ the rootfs if the mount path doesn't exist.
+
+ Args:
+ device: A pyfdisk.py Device object describing the partitioning
+ system_dir: A path to the Baserock rootfs to be modified
+ '''
+
+ for part in device.partitionlist:
+ if hasattr(part, 'mountpoint') and part.mountpoint != '/':
+ part_mount_dir = os.path.join(system_dir,
+ re.sub('^/', '', part.mountpoint))
+ if os.path.exists(part_mount_dir):
+ self.status(msg='Deleting files in mountpoint '
+ 'for %s partition' % part.mountpoint)
+ self.empty_dir(part_mount_dir)
+ else:
+ self.status(msg='Creating empty mount directory '
+ 'for %s partition' % part.mountpoint)
+ os.mkdir(part_mount_dir)
+
+ def create_orig(self, version_root, temp_root):
+ '''Create the default "factory" system.'''
+
+ orig = os.path.join(version_root, 'orig')
+
+ self.status(msg='Creating orig subvolume')
+ subprocess.check_call(['btrfs', 'subvolume', 'create', orig])
+ self.status(msg='Copying files to orig subvolume')
+ subprocess.check_call(['cp', '-a', temp_root + '/.', orig + '/.'])
+
+ return orig
+
+ def create_run(self, version_root):
+ '''Create the 'run' snapshot.'''
+
+ self.status(msg='Creating run subvolume')
+ orig = os.path.join(version_root, 'orig')
+ run = os.path.join(version_root, 'run')
+ subprocess.check_call(
+ ['btrfs', 'subvolume', 'snapshot', orig, run])
+
+ def create_state_subvolume(self, system_dir, mountpoint, state_subdir):
+ '''Create a shared state subvolume.
+
+ We need to move any files added to the temporary rootfs by the
+ configure extensions to their correct home. For example, they might
+ have added keys in `/root/.ssh` which we now need to transfer to
+ `/state/root/.ssh`.
+
+ '''
+ self.status(msg='Creating %s subvolume' % state_subdir)
+ subvolume = os.path.join(mountpoint, 'state', state_subdir)
+ subprocess.check_call(['btrfs', 'subvolume', 'create', subvolume])
+ os.chmod(subvolume, 0o755)
+
+ existing_state_dir = os.path.join(system_dir, state_subdir)
+ self.move_dir_contents(existing_state_dir, subvolume)
+
+ def move_dir_contents(self, source_dir, target_dir):
+ '''Move all files source_dir, to target_dir'''
+
+ n = self.__cmd_files_in_dir(['mv'], source_dir, target_dir)
+ if n:
+ self.status(msg='Moved %d files to %s' % (n, target_dir))
+
+ def copy_dir_contents(self, source_dir, target_dir):
+ '''Copy all files source_dir, to target_dir'''
+
+ n = self.__cmd_files_in_dir(['cp', '-a', '-r'], source_dir, target_dir)
+ if n:
+ self.status(msg='Copied %d files to %s' % (n, target_dir))
+
+ def empty_dir(self, directory):
+ '''Empty the contents of a directory, but not the directory itself'''
+
+ n = self.__cmd_files_in_dir(['rm', '-rf'], directory)
+ if n:
+ self.status(msg='Deleted %d files in %s' % (n, directory))
+
+ def __cmd_files_in_dir(self, cmd, source_dir, target_dir=None):
+ files = []
+ if os.path.exists(source_dir):
+ files = os.listdir(source_dir)
+ for filename in files:
+ filepath = os.path.join(source_dir, filename)
+ add_params = [filepath, target_dir] if target_dir else [filepath]
+ subprocess.check_call(cmd + add_params)
+ return len(files)
+
+ def complete_fstab_for_btrfs_layout(self, system_dir,
+ rootfs_uuid=None, device=None):
+ '''Fill in /etc/fstab entries for the default Btrfs disk layout.
+
+ In the future we should move this code out of the write extension and
+ in to a configure extension. To do that, though, we need some way of
+ informing the configure extension what layout should be used. Right now
+ a configure extension doesn't know if the system is going to end up as
+ a Btrfs disk image, a tarfile or something else and so it can't come
+ up with a sensible default fstab.
+
+ Configuration extensions can already create any /etc/fstab that they
+ like. This function only fills in entries that are missing, so if for
+ example the user configured /home to be on a separate partition, that
+ decision will be honoured and /state/home will not be created.
+
+ '''
+ shared_state_dirs = {'home', 'root', 'opt', 'srv', 'var'}
+
+ fstab = Fstab(os.path.join(system_dir, 'etc', 'fstab'))
+ existing_mounts = fstab.get_mounts()
+
+ if '/' in existing_mounts:
+ root_device = existing_mounts['/']
+ else:
+ root_device = (self.get_root_device() if rootfs_uuid is None else
+ 'UUID=%s' % rootfs_uuid)
+ fstab.add_line('%s / btrfs defaults,rw,noatime 0 1' % root_device)
+
+ # Add fstab entries for partitions
+ part_mountpoints = set()
+ if device:
+ mount_parts = set(p for p in device.partitionlist
+ if hasattr(p, 'mountpoint') and p.mountpoint != '/')
+ for part in mount_parts:
+ if part.mountpoint not in existing_mounts:
+ # Get filesystem UUID
+ part_uuid = self.get_uuid(device.location,
+ part.extent.start *
+ device.sector_size)
+ self.status(msg='Adding fstab entry for %s '
+ 'partition' % part.mountpoint)
+ fstab.add_line('UUID=%s %s %s defaults,rw,noatime '
+ '0 2' % (part_uuid, part.mountpoint,
+ part.filesystem))
+ part_mountpoints.add(part.mountpoint)
+ else:
+ self.status(msg='WARNING: an entry already exists in '
+ 'fstab for %s partition, skipping' %
+ part.mountpoint)
+
+ # Add entries for state dirs
+ all_mountpoints = set(existing_mounts.keys()) | part_mountpoints
+ state_dirs_to_create = set()
+ for state_dir in shared_state_dirs:
+ mp = '/' + state_dir
+ if mp not in all_mountpoints:
+ state_dirs_to_create.add(state_dir)
+ state_subvol = os.path.join('/state', state_dir)
+ fstab.add_line(
+ '%s /%s btrfs subvol=%s,defaults,rw,noatime 0 2' %
+ (root_device, state_dir, state_subvol))
+
+ fstab.write()
+ return state_dirs_to_create
+
+ def find_initramfs(self, temp_root):
+ '''Check whether the rootfs has an initramfs.
+
+ Uses the INITRAMFS_PATH option to locate it.
+ '''
+ if 'INITRAMFS_PATH' in os.environ:
+ initramfs = os.path.join(temp_root, os.environ['INITRAMFS_PATH'])
+ if not os.path.exists(initramfs):
+ raise ExtensionError('INITRAMFS_PATH specified, '
+ 'but file does not exist')
+ return initramfs
+ return None
+
+ def install_initramfs(self, initramfs_path, version_root):
+ '''Install the initramfs outside of 'orig' or 'run' subvolumes.
+
+ This is required because syslinux doesn't traverse subvolumes when
+ loading the kernel or initramfs.
+ '''
+ self.status(msg='Installing initramfs')
+ initramfs_dest = os.path.join(version_root, 'initramfs')
+ subprocess.check_call(['cp', '-a', initramfs_path, initramfs_dest])
+
+ def install_kernel(self, version_root, temp_root):
+ '''Install the kernel outside of 'orig' or 'run' subvolumes'''
+
+ self.status(msg='Installing kernel')
+ image_names = ['vmlinuz', 'zImage', 'uImage']
+ kernel_dest = os.path.join(version_root, 'kernel')
+ for name in image_names:
+ try_path = os.path.join(temp_root, 'boot', name)
+ if os.path.exists(try_path):
+ subprocess.check_call(['cp', '-a', try_path, kernel_dest])
+ break
+
+ def install_dtb(self, version_root, temp_root):
+ '''Install the device tree outside of 'orig' or 'run' subvolumes'''
+
+ self.status(msg='Installing devicetree')
+ device_tree_path = self.get_dtb_path()
+ dtb_dest = os.path.join(version_root, 'dtb')
+ try_path = os.path.join(temp_root, device_tree_path)
+ if os.path.exists(try_path):
+ subprocess.check_call(['cp', '-a', try_path, dtb_dest])
+ else:
+ logging.error("Failed to find device tree %s", device_tree_path)
+ raise ExtensionError(
+ 'Failed to find device tree %s' % device_tree_path)
+
+ def get_dtb_path(self):
+ return os.environ.get('DTB_PATH', '')
+
+ def get_bootloader_install(self):
+ # Do we actually want to install the bootloader?
+ # Set this to "none" to prevent the install
+ return os.environ.get('BOOTLOADER_INSTALL', 'extlinux')
+
+ def get_bootloader_config_format(self):
+ # The config format for the bootloader,
+ # if not set we default to extlinux for x86
+ return os.environ.get('BOOTLOADER_CONFIG_FORMAT', 'extlinux')
+
+ def get_extra_kernel_args(self):
+ return os.environ.get('KERNEL_ARGS', '')
+
+ def get_root_device(self):
+ return os.environ.get('ROOT_DEVICE', '/dev/sda')
+
+ def generate_bootloader_config(self, *args, **kwargs):
+ '''Install extlinux on the newly created disk image.'''
+ config_function_dict = {
+ 'extlinux': self.generate_extlinux_config,
+ }
+
+ config_type = self.get_bootloader_config_format()
+ if config_type in config_function_dict:
+ config_function_dict[config_type](*args, **kwargs)
+ else:
+ raise ExtensionError(
+ 'Invalid BOOTLOADER_CONFIG_FORMAT %s' % config_type)
+
+ def generate_extlinux_config(self, real_root,
+ rootfs_uuid=None, root_guid=None):
+ '''Generate the extlinux configuration file
+
+ Args:
+ real_root: Path to the mounted top level of the root filesystem
+ rootfs_uuid: Specify a filesystem UUID which can be loaded using
+ an initramfs aware of filesystems
+ root_guid: Specify a partition GUID, can be used without an
+ initramfs
+ '''
+
+ self.status(msg='Creating extlinux.conf')
+ # To be compatible with u-boot, create the extlinux.conf file in
+ # /extlinux/ rather than /
+ # Syslinux, however, requires this to be in /, so create a symlink
+ # as well
+ config_path = os.path.join(real_root, 'extlinux')
+ os.makedirs(config_path)
+ config = os.path.join(config_path, 'extlinux.conf')
+ os.symlink('extlinux/extlinux.conf', os.path.join(real_root,
+ 'extlinux.conf'))
+
+ ''' Please also update the documentation in the following files
+ if you change these default kernel args:
+ - kvm.write.help
+ - rawdisk.write.help
+ - virtualbox-ssh.write.help '''
+ kernel_args = (
+ 'rw ' # ro ought to work, but we don't test that regularly
+ 'init=/sbin/init ' # default, but it doesn't hurt to be explicit
+ 'rootfstype=btrfs ' # required when using initramfs, also boots
+ # faster when specified without initramfs
+ 'rootflags=subvol=systems/default/run ') # boot runtime subvol
+
+ # See init/do_mounts.c:182 in the kernel source, in the comment above
+ # function name_to_dev_t(), for an explanation of the available
+ # options for the kernel parameter 'root', particularly when using
+ # GUID/UUIDs
+ if rootfs_uuid:
+ root_device = 'UUID=%s' % rootfs_uuid
+ elif root_guid:
+ root_device = 'PARTUUID=%s' % root_guid
+ else:
+ # Fall back to the root partition named in the cluster
+ root_device = self.get_root_device()
+ kernel_args += 'root=%s ' % root_device
+
+ kernel_args += self.get_extra_kernel_args()
+ with open(config, 'w') as f:
+ f.write('default linux\n')
+ f.write('timeout 1\n')
+ f.write('label linux\n')
+ f.write('kernel /systems/default/kernel\n')
+ if rootfs_uuid is not None:
+ f.write('initrd /systems/default/initramfs\n')
+ if self.get_dtb_path() != '':
+ f.write('devicetree /systems/default/dtb\n')
+ f.write('append %s\n' % kernel_args)
+
+ def install_bootloader(self, *args, **kwargs):
+ install_function_dict = {
+ 'extlinux': self.install_bootloader_extlinux,
+ }
+
+ install_type = self.get_bootloader_install()
+ if install_type in install_function_dict:
+ install_function_dict[install_type](*args, **kwargs)
+ elif install_type != 'none':
+ raise ExtensionError(
+ 'Invalid BOOTLOADER_INSTALL %s' % install_type)
+
+ def install_bootloader_extlinux(self, real_root):
+ self.status(msg='Installing extlinux')
+ subprocess.check_call(['extlinux', '--install', real_root])
+
+ # FIXME this hack seems to be necessary to let extlinux finish
+ subprocess.check_call(['sync'])
+ time.sleep(2)
+
+ def install_syslinux_blob(self, device, orig_root):
+ '''Install Syslinux MBR blob
+
+ This is the first stage of boot (for partitioned images) on x86
+ machines. It is not required where there is no partition table. The
+ syslinux bootloader is written to the MBR, and is capable of loading
+ extlinux. This only works when the partition is set as bootable (MBR),
+ or the legacy boot flag is set (GPT). The blob is built with extlinux,
+ and found in the rootfs'''
+
+ pt_format = device.partition_table_format.lower()
+ if pt_format in ('gpb', 'mbr'):
+ blob = 'mbr.bin'
+ elif pt_format == 'gpt':
+ blob = 'gptmbr.bin'
+ blob_name = 'usr/share/syslinux/' + blob
+ self.status(msg='Installing syslinux %s blob' % pt_format.upper())
+ blob_location = os.path.join(orig_root, blob_name)
+ if os.path.exists(blob_location):
+ subprocess.check_call(['dd', 'if=%s' % blob_location,
+ 'of=%s' % device.location,
+ 'bs=440', 'count=1', 'conv=notrunc'])
+ else:
+ raise ExtensionError('MBR blob not found. Is this the correct'
+ 'architecture? The MBR blob will only be built for x86'
+ 'systems. You may wish to configure BOOTLOADER_INSTALL')
+
+ def install_syslinux_menu(self, real_root, temp_root):
+ '''Make syslinux/extlinux menu binary available.
+
+ The syslinux boot menu is compiled to a file named menu.c32. Extlinux
+ searches a few places for this file but it does not know to look inside
+ our subvolume, so we copy it to the filesystem root.
+
+ If the file is not available, the bootloader will still work but will
+ not be able to show a menu.
+
+ '''
+ menu_file = os.path.join(temp_root, 'usr', 'share', 'syslinux',
+ 'menu.c32')
+ if os.path.isfile(menu_file):
+ self.status(msg='Copying menu.c32')
+ shutil.copy(menu_file, real_root)
+
+ def parse_attach_disks(self):
+ '''Parse $ATTACH_DISKS into list of disks to attach.'''
+
+ if 'ATTACH_DISKS' in os.environ:
+ s = os.environ['ATTACH_DISKS']
+ return s.split(':')
+ else:
+ return []
+
+ def bootloader_config_is_wanted(self):
+ '''Does the user want to generate a bootloader config?
+
+ The user may set $BOOTLOADER_CONFIG_FORMAT to the desired
+ format. 'extlinux' is the only allowed value, and is the default
+ value for x86-32 and x86-64.
+
+ '''
+
+ def is_x86(arch):
+ return (arch == 'x86_64' or
+ (arch.startswith('i') and arch.endswith('86')))
+
+ value = os.environ.get('BOOTLOADER_CONFIG_FORMAT', '')
+ if value == '':
+ if not is_x86(os.uname()[-1]):
+ return False
+
+ return True
+
+ def get_environment_boolean(self, variable, default='no'):
+ '''Parse a yes/no boolean passed through the environment.'''
+
+ value = os.environ.get(variable, default).lower()
+ if value in ('no', '0', 'false'):
+ return False
+ elif value in ('yes', '1', 'true'):
+ return True
+ else:
+ raise ExtensionError('Unexpected value for %s: %s' %
+ (variable, value))
+
+ def check_ssh_connectivity(self, ssh_host):
+ try:
+ output = ssh_runcmd(ssh_host, ['echo', 'test'])
+ except ExtensionError as e:
+ logging.error("Error checking SSH connectivity: %s", str(e))
+ raise ExtensionError(
+ 'Unable to SSH to %s: %s' % (ssh_host, e))
+
+ if output.strip() != 'test':
+ raise ExtensionError(
+ 'Unexpected output from remote machine: %s' % output.strip())
+
+ def is_device(self, location):
+ try:
+ st = os.stat(location)
+ return stat.S_ISBLK(st.st_mode)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return False
+ raise
+
+ def create_partitioned_system(self, temp_root, location):
+ '''Deploy a bootable Baserock system with a custom partition layout.
+
+ Called if USE_PARTITIONING=yes is set in the deployment options.
+
+ '''
+ part_spec = os.environ.get('PARTITION_FILE', 'partitioning/default')
+
+ disk_size = self.get_disk_size()
+ if not disk_size:
+ raise writeexts.ExtensionError('DISK_SIZE is not defined')
+
+ dev = partitioning.do_partitioning(location, disk_size,
+ temp_root, part_spec)
+ boot_partition_available = dev.get_partition_by_mountpoint('/boot')
+
+ for part in dev.partitionlist:
+ if not hasattr(part, 'mountpoint'):
+ continue
+ if part.mountpoint == '/':
+ # Re-format the rootfs, to include needed extra features
+ with pyfdisk.create_loopback(location,
+ part.extent.start *
+ dev.sector_size, part.size) as l:
+ self.mkfs_btrfs(l)
+
+ self.status(msg='Mounting partition %d' % part.number)
+ offset = part.extent.start * dev.sector_size
+ with self.mount_partition(location,
+ offset, part.size) as part_mount_dir:
+ if part.mountpoint == '/':
+ # Install root filesystem
+ rfs_uuid = self.get_uuid(location, part.extent.start *
+ dev.sector_size)
+ self.create_versioned_layout(part_mount_dir, 'factory')
+ self.create_btrfs_system_rootfs(temp_root, part_mount_dir,
+ 'factory', rfs_uuid, dev)
+
+ # If there's no /boot partition, but we do need to generate
+ # a bootloader configuration file, then it needs to go in
+ # the root partition.
+ if (boot_partition_available is False
+ and self.bootloader_config_is_wanted()):
+ self.create_bootloader_config(
+ temp_root, part_mount_dir, 'factory', rfs_uuid,
+ dev)
+
+ if self.get_bootloader_install() == 'extlinux':
+ # The extlinux/syslinux MBR blob always needs to be
+ # installed in the root partition.
+ self.install_syslinux_blob(dev, temp_root)
+ else:
+ # Copy files to partition from unpacked rootfs
+ src_dir = os.path.join(temp_root,
+ re.sub('^/', '', part.mountpoint))
+ self.status(msg='Copying files to %s partition' %
+ part.mountpoint)
+ self.copy_dir_contents(src_dir, part_mount_dir)
+
+ if (part.mountpoint == '/boot' and
+ self.bootloader_config_is_wanted()):
+ # We need to mirror the layout of the root partition in the
+ # /boot partition. Each kernel lives in its own
+ # systems/$version_label/ directory within the /boot
+ # partition.
+ self.create_versioned_layout(part_mount_dir, 'factory')
+ self.create_bootloader_config(temp_root, part_mount_dir,
+ 'factory', None, dev)
+
+ # Write raw files to disk with dd
+ partitioning.process_raw_files(dev, temp_root)
+
+ @contextlib.contextmanager
+ def mount_partition(self, location, offset_bytes, size_bytes):
+ '''Mount a partition in a partitioned device or image'''
+
+ with pyfdisk.create_loopback(location, offset=offset_bytes,
+ size=size_bytes) as loop:
+ with self.mount(loop) as mountpoint:
+ yield mountpoint
+
+ @contextlib.contextmanager
+ def find_and_mount_rootfs(self, location):
+ '''
+ Mount a Baserock rootfs inside a partitioned device or image
+
+ This function searches a disk image or device, with unknown
+ partitioning scheme, for a Baserock rootfs. This is done by finding
+ offsets and sizes of partitions in the partition table, mounting each
+ partition, and checking whether a known path exists in the mount.
+
+ Args:
+ location: the location of the disk image or device to search
+ Returns:
+ A path to the mount point of the mounted Baserock rootfs
+ '''
+
+ if pyfdisk.get_pt_type(location) == 'none':
+ with self.mount(location) as mountpoint:
+ yield mountpoint
+
+ sector_size = pyfdisk.get_sector_size(location)
+ partn_sizes = pyfdisk.get_partition_sector_sizes(location)
+ for i, offset in enumerate(pyfdisk.get_partition_offsets(location)):
+ try:
+ with self.mount_partition(location, offset * sector_size,
+ partn_sizes[i] * sector_size) as mp:
+ path = os.path.join(mp, 'systems/default/orig/baserock')
+ if os.path.exists(path):
+ self.status(msg='Found a Baserock rootfs at '
+ 'offset %d sectors/%d bytes' %
+ (offset, offset * sector_size))
+ yield mp
+ except BaseException:
+ # Probably a partition without a filesystem, carry on
+ pass
diff --git a/old/extensions/xfer-hole b/old/extensions/xfer-hole
new file mode 100755
index 00000000..91f1be01
--- /dev/null
+++ b/old/extensions/xfer-hole
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+#
+# Send a sparse file more space-efficiently.
+# See recv-hole for a description of the protocol.
+#
+# Note that xfer-hole requires a version of Linux with support for
+# SEEK_DATA and SEEK_HOLE.
+#
+#
+# Copyright (C) 2014-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-2 =*=
+
+
+
+import errno
+import os
+import sys
+
+
+SEEK_DATA = 3
+SEEK_HOLE = 4
+
+
+filename = sys.argv[1]
+fd = os.open(filename, os.O_RDONLY)
+pos = 0
+
+
+DATA = 'data'
+HOLE = 'hole'
+EOF = 'eof'
+
+
+def safe_lseek(fd, pos, whence):
+ try:
+ return os.lseek(fd, pos, whence)
+ except OSError as e:
+ if e.errno == errno.ENXIO:
+ return -1
+ raise
+
+
+def current_data_or_pos(fd, pos):
+ length = safe_lseek(fd, 0, os.SEEK_END)
+ next_data = safe_lseek(fd, pos, SEEK_DATA)
+ next_hole = safe_lseek(fd, pos, SEEK_HOLE)
+
+ if pos == length:
+ return EOF, pos
+ elif pos == next_data:
+ return DATA, pos
+ elif pos == next_hole:
+ return HOLE, pos
+ else:
+ assert False, \
+ ("Do not understand: pos=%d next_data=%d next_hole=%d" %
+ (pos, next_data, next_hole))
+
+
+def next_data_or_hole(fd, pos):
+ length = safe_lseek(fd, 0, os.SEEK_END)
+ next_data = safe_lseek(fd, pos, SEEK_DATA)
+ next_hole = safe_lseek(fd, pos, SEEK_HOLE)
+
+ if pos == length:
+ return EOF, pos
+ elif pos == next_data:
+ # We are at data.
+ if next_hole == -1 or next_hole == length:
+ return EOF, length
+ else:
+ return HOLE, next_hole
+ elif pos == next_hole:
+ # We are at a hole.
+ if next_data == -1 or next_data == length:
+ return EOF, length
+ else:
+ return DATA, next_data
+ else:
+ assert False, \
+ ("Do not understand: pos=%d next_data=%d next_hole=%d" %
+ (pos, next_data, next_hole))
+
+
+def find_data_and_holes(fd):
+ pos = safe_lseek(fd, 0, os.SEEK_CUR)
+
+ kind, pos = current_data_or_pos(fd, pos)
+ while kind != EOF:
+ yield kind, pos
+ kind, pos = next_data_or_hole(fd, pos)
+ yield kind, pos
+
+
+def make_xfer_instructions(fd):
+ prev_kind = None
+ prev_pos = None
+ for kind, pos in find_data_and_holes(fd):
+ if prev_kind == DATA:
+ yield (DATA, prev_pos, pos)
+ elif prev_kind == HOLE:
+ yield (HOLE, prev_pos, pos)
+ prev_kind = kind
+ prev_pos = pos
+
+
+def copy_slice_from_file(to, fd, start, end):
+ safe_lseek(fd, start, os.SEEK_SET)
+ nbytes = end - start
+ max_at_a_time = 1024**2
+ while nbytes > 0:
+ data = os.read(fd, min(nbytes, max_at_a_time))
+ if not data:
+ break
+ to.write(data)
+ nbytes -= len(data)
+
+
+for kind, start, end in make_xfer_instructions(fd):
+ if kind == HOLE:
+ sys.stdout.write('HOLE\n%d\n' % (end - start))
+ elif kind == DATA:
+ sys.stdout.write('DATA\n%d\n' % (end - start))
+ copy_slice_from_file(sys.stdout, fd, start, end)