From a7eaabeec2c6f77d7b326b23b3cee4bad21b3eb8 Mon Sep 17 00:00:00 2001 From: Adam Coldrick Date: Mon, 1 Jun 2015 10:38:45 +0000 Subject: Put all the extensions in a subdirectory --- VERSION | 2 +- busybox-init.configure | 145 ---- ceph.configure | 266 -------- cloud-init.configure | 63 -- clusters/image-package-example.morph | 2 +- distbuild.configure | 132 ---- extensions/busybox-init.configure | 145 ++++ extensions/ceph.configure | 266 ++++++++ extensions/cloud-init.configure | 63 ++ extensions/distbuild.configure | 132 ++++ extensions/hosts | 1 + extensions/image-package-example/README | 9 + extensions/image-package-example/common.sh.in | 72 ++ .../image-package-example/disk-install.sh.in | 51 ++ .../image-package-example/make-disk-image.sh.in | 36 + extensions/image-package.write | 168 +++++ extensions/installer.configure | 48 ++ extensions/jffs2.write | 64 ++ extensions/jffs2.write.help | 28 + extensions/mason.configure | 153 +++++ extensions/moonshot-kernel.configure | 33 + extensions/nfsboot-server.configure | 58 ++ extensions/openstack-ceilometer.configure | 120 ++++ extensions/openstack-cinder.configure | 125 ++++ extensions/openstack-glance.configure | 101 +++ extensions/openstack-ironic.configure | 155 +++++ extensions/openstack-keystone.configure | 123 ++++ extensions/openstack-network.configure | 50 ++ extensions/openstack-neutron.configure | 138 ++++ extensions/openstack-nova.configure | 168 +++++ extensions/openstack-swift-controller.configure | 49 ++ extensions/pxeboot.check | 86 +++ extensions/pxeboot.write | 755 +++++++++++++++++++++ extensions/pxeboot.write.help | 166 +++++ extensions/sdk.write | 284 ++++++++ extensions/strip-gplv3.configure | 101 +++ extensions/swift-build-rings.yml | 34 + extensions/swift-storage-devices-validate.py | 60 ++ extensions/swift-storage.configure | 107 +++ extensions/trove.configure | 148 ++++ extensions/trove.configure.help | 126 ++++ extensions/vagrant.configure | 55 ++ hosts | 1 - image-package-example/README | 9 - image-package-example/common.sh.in | 72 -- image-package-example/disk-install.sh.in | 51 -- image-package-example/make-disk-image.sh.in | 36 - image-package.write | 168 ----- installer.configure | 48 -- jffs2.write | 64 -- jffs2.write.help | 28 - mason.configure | 153 ----- moonshot-kernel.configure | 33 - nfsboot-server.configure | 58 -- openstack-ceilometer.configure | 120 ---- openstack-cinder.configure | 125 ---- openstack-glance.configure | 101 --- openstack-ironic.configure | 155 ----- openstack-keystone.configure | 123 ---- openstack-network.configure | 50 -- openstack-neutron.configure | 138 ---- openstack-nova.configure | 168 ----- openstack-swift-controller.configure | 49 -- pxeboot.check | 86 --- pxeboot.write | 755 --------------------- pxeboot.write.help | 166 ----- sdk.write | 284 -------- strip-gplv3.configure | 101 --- swift-build-rings.yml | 34 - swift-storage-devices-validate.py | 60 -- swift-storage.configure | 107 --- trove.configure | 148 ---- trove.configure.help | 126 ---- vagrant.configure | 55 -- 74 files changed, 4280 insertions(+), 4280 deletions(-) delete mode 100644 busybox-init.configure delete mode 100644 ceph.configure delete mode 100755 cloud-init.configure delete mode 100644 distbuild.configure create mode 100644 extensions/busybox-init.configure create mode 100644 extensions/ceph.configure create mode 100755 extensions/cloud-init.configure create mode 100644 extensions/distbuild.configure create mode 100644 extensions/hosts create mode 100644 extensions/image-package-example/README create mode 100644 extensions/image-package-example/common.sh.in create mode 100644 extensions/image-package-example/disk-install.sh.in create mode 100644 extensions/image-package-example/make-disk-image.sh.in create mode 100755 extensions/image-package.write create mode 100755 extensions/installer.configure create mode 100644 extensions/jffs2.write create mode 100644 extensions/jffs2.write.help create mode 100644 extensions/mason.configure create mode 100644 extensions/moonshot-kernel.configure create mode 100755 extensions/nfsboot-server.configure create mode 100644 extensions/openstack-ceilometer.configure create mode 100644 extensions/openstack-cinder.configure create mode 100644 extensions/openstack-glance.configure create mode 100644 extensions/openstack-ironic.configure create mode 100644 extensions/openstack-keystone.configure create mode 100644 extensions/openstack-network.configure create mode 100644 extensions/openstack-neutron.configure create mode 100644 extensions/openstack-nova.configure create mode 100644 extensions/openstack-swift-controller.configure create mode 100755 extensions/pxeboot.check create mode 100644 extensions/pxeboot.write create mode 100644 extensions/pxeboot.write.help create mode 100755 extensions/sdk.write create mode 100755 extensions/strip-gplv3.configure create mode 100644 extensions/swift-build-rings.yml create mode 100755 extensions/swift-storage-devices-validate.py create mode 100644 extensions/swift-storage.configure create mode 100755 extensions/trove.configure create mode 100644 extensions/trove.configure.help create mode 100644 extensions/vagrant.configure delete mode 100644 hosts delete mode 100644 image-package-example/README delete mode 100644 image-package-example/common.sh.in delete mode 100644 image-package-example/disk-install.sh.in delete mode 100644 image-package-example/make-disk-image.sh.in delete mode 100755 image-package.write delete mode 100755 installer.configure delete mode 100644 jffs2.write delete mode 100644 jffs2.write.help delete mode 100644 mason.configure delete mode 100644 moonshot-kernel.configure delete mode 100755 nfsboot-server.configure delete mode 100644 openstack-ceilometer.configure delete mode 100644 openstack-cinder.configure delete mode 100644 openstack-glance.configure delete mode 100644 openstack-ironic.configure delete mode 100644 openstack-keystone.configure delete mode 100644 openstack-network.configure delete mode 100644 openstack-neutron.configure delete mode 100644 openstack-nova.configure delete mode 100644 openstack-swift-controller.configure delete mode 100755 pxeboot.check delete mode 100644 pxeboot.write delete mode 100644 pxeboot.write.help delete mode 100755 sdk.write delete mode 100755 strip-gplv3.configure delete mode 100644 swift-build-rings.yml delete mode 100755 swift-storage-devices-validate.py delete mode 100644 swift-storage.configure delete mode 100755 trove.configure delete mode 100644 trove.configure.help delete mode 100644 vagrant.configure diff --git a/VERSION b/VERSION index 0a70affa..e9b5f147 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -version: 3 +version: 4 diff --git a/busybox-init.configure b/busybox-init.configure deleted file mode 100644 index c7dba3b9..00000000 --- a/busybox-init.configure +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to configure a system -# to use busybox for its init, if INIT_SYSTEM=busybox is specified. -# -# As well as checking INIT_SYSTEM, the following variables are used. -# -# Getty configuration: -# * CONSOLE_DEVICE: Which device to spawn a getty on (default: ttyS0) -# * CONSOLE_BAUDRATE: Baud rate of the console (default: 115200) -# * CONSOLE_MODE: What kind of terminal this console emulates -# (default: vt100) - -if [ "$INIT_SYSTEM" != busybox ]; then - echo Not configuring system to use busybox init. - exit 0 -fi - -set -e -echo Configuring system to use busybox init - -RUN_SCRIPT=/etc/rcS -INIT_SCRIPT=/sbin/init - -install_mdev_config(){ - install -D -m644 /dev/stdin "$1" <<'EOF' -# support module loading on hotplug -$MODALIAS=.* root:root 660 @modprobe "$MODALIAS" - -# null may already exist; therefore ownership has to be changed with command -null root:root 666 @chmod 666 $MDEV -zero root:root 666 -full root:root 666 -random root:root 444 -urandom root:root 444 -hwrandom root:root 444 -grsec root:root 660 - -kmem root:root 640 -mem root:root 640 -port root:root 640 -# console may already exist; therefore ownership has to be changed with command -console root:root 600 @chmod 600 $MDEV -ptmx root:root 666 -pty.* root:root 660 - -# Typical devices - -tty root:root 666 -tty[0-9]* root:root 660 -vcsa*[0-9]* root:root 660 -ttyS[0-9]* root:root 660 - -# block devices -ram[0-9]* root:root 660 -loop[0-9]+ root:root 660 -sd[a-z].* root:root 660 -hd[a-z][0-9]* root:root 660 -md[0-9]* root:root 660 -sr[0-9]* root:root 660 @ln -sf $MDEV cdrom -fd[0-9]* root:root 660 - -# net devices -SUBSYSTEM=net;.* root:root 600 @nameif -tun[0-9]* root:root 600 =net/ -tap[0-9]* root:root 600 =net/ -EOF -} - -install_start_script(){ - install -D -m755 /dev/stdin "$1" <<'EOF' -#!/bin/sh -mount -t devtmpfs devtmpfs /dev -mount -t proc proc /proc -mount -t sysfs sysfs /sys -mkdir -p /dev/pts -mount -t devpts devpts /dev/pts - -echo /sbin/mdev >/proc/sys/kernel/hotplug -mdev -s - -hostname -F /etc/hostname - -run-parts -a start /etc/init.d -EOF -} - -install_inittab(){ - local inittab="$1" - local dev="$2" - local baud="$3" - local mode="$4" - install -D -m644 /dev/stdin "$1" <&2 - exit 1 -} - -install_mdev_config "$1/etc/mdev.conf" - -install_start_script "$1$RUN_SCRIPT" - -install_inittab "$1/etc/inittab" "${CONSOLE_DEV-ttyS0}" \ - "${CONSOLE_BAUD-115200}" "${CONSOLE_MODE-vt100}" - -install_init_symlink "$1$INIT_SCRIPT" diff --git a/ceph.configure b/ceph.configure deleted file mode 100644 index c3cd92d1..00000000 --- a/ceph.configure +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License.5 -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -import cliapp -import sys -import os -import subprocess -import shutil -import re -import stat - -systemd_monitor_template = """ -[Unit] -Description=Ceph Monitor firstboot setup -After=network-online.target - -[Service] -ExecStart=/bin/bash -c "/root/setup-ceph-head | tee /root/monitor-setuplog" -ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-monitor-fboot.service - -[Install] -Wanted-By=multi-user.target -""" - -systemd_monitor_fname_template = "ceph-monitor-fboot.service" - -systemd_osd_template = """ -[Unit] -Description=Ceph osd firstboot setup -After=network-online.target - -[Service] -ExecStart=/bin/bash -c "/root/setup-ceph-node | tee /root/storage-setuplog" -ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-storage-fboot.service - -[Install] -Wanted-By=multi-user.target -""" -systemd_osd_fname_template = "ceph-storage-fboot.service" - -ceph_monitor_config_template = """#!/bin/bash -ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' -ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' -ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring -monmaptool --create --add 0 10.0.100.2 --fsid 9ceb9257-7541-4de4-b34b-586079986700 /tmp/monmap -mkdir /var/lib/ceph/mon/ceph-0 -ceph-mon --mkfs -i 0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring -/etc/init.d/ceph start mon.0 -touch ~/monitor-configured -""" - -ceph_storage_config_template = """#!/bin/bash -scp 10.0.100.2:/var/lib/ceph/bootstrap-osd/ceph.keyring /var/lib/ceph/bootstrap-osd/ -echo -e "n\np\n1\n\n\nw\n" | fdisk /dev/sdb -ceph-disk prepare --cluster ceph --cluster-uuid 9ceb9257-7541-4de4-b34b-586079986700 --fs-type ext4 /dev/sdb1 -sudo ceph-disk activate /dev/sdb1 -/etc/init.d/ceph start osd.0 -touch ~/storage-configured -""" - -executable_file_permissions = stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR | \ - stat.S_IXGRP | stat.S_IRGRP | \ - stat.S_IXOTH | stat.S_IROTH - -class CephConfigurationExtension(cliapp.Application): - """ - Set up ceph server daemons. - - Must include the following environment variables: - - HOSTNAME - Must be defined it is used as the ID for - the monitor and metadata daemons. - CEPH_CONF - Provide a ceph configuration file. - - Optional environment variables: - - CEPH_CLUSTER - Cluster name, if not provided defaults to 'ceph'. - - CEPH_BOOTSTRAP_OSD - Registered key capable of generating OSD - keys. - CEPH_BOOTSTRAP_MDS - Registered key capable of generating MDS - keys. - - Bootstrap keys are required for creating OSD daemons on servers - that do not have a running monitor daemon. They are gathered - by 'ceph-deploy gatherkeys' but can be generated and registered - separately. - - CEPH_MON - (Blank) Create a ceph monitor daemon on the image. - CEPH_MON_KEYRING - Location of monitor keyring. Required by the - monitor if using cephx authentication. - - CEPH_OSD_X_DATA_DIR - Location of data directory for OSD. - Create an OSD daemon on image. 'X' is an integer - id, many osd daemons may be run on same server. - - CEPH_MDS - (Blank) Create a metadata server daemon on server. - """ - - def process_args(self, args): - - if "HOSTNAME" not in os.environ: - print "ERROR: Need a hostname defined by 'HOSTNAME'" - sys.exit(1) - if "CEPH_CLUSTER" not in os.environ: - print "ERROR: Need a cluster name defined by 'CEPH_CLUSTER'" - sys.exit(1) - if "CEPH_CONF" not in os.environ: - print "ERROR: Need a ceph conf file defined by 'CEPH_CONF'" - sys.exit(1) - - self.dest_dir = args[0] - - self.cluster_name = os.environ["CEPH_CLUSTER"] - self.hostname = os.environ["HOSTNAME"] - - self.conf_file = "/etc/ceph/{}.conf".format(self.cluster_name) - self.mon_dir = "/var/lib/ceph/mon/" - self.osd_dir = "/var/lib/ceph/osd/" - self.mds_dir = "/var/lib/ceph/mds/" - self.tmp_dir = "/var/lib/ceph/tmp/" - self.bootstrap_mds_dir = "/var/lib/ceph/bootstrap-mds/" - self.bootstrap_osd_dir = "/var/lib/ceph/bootstrap-osd/" - self.systemd_dir = "/etc/systemd/system/" - self.systemd_multiuser_dir = "/etc/systemd/system/multi-user.target.wants/" - - self.copy_to_img(os.environ["CEPH_CONF"], self.conf_file) - - # Copy over bootstrap keyrings - if "CEPH_BOOTSTRAP_OSD" in os.environ: - self.copy_bootstrap_osd(os.environ["CEPH_BOOTSTRAP_OSD"]); - if "CEPH_BOOTSTRAP_MDS" in os.environ: - self.copy_bootstrap_mds(os.environ["CEPH_BOOTSTRAP_MDS"]); - - # Configure any monitor daemons - if "CEPH_MON" in os.environ: - self.create_mon_data_dir(os.environ.get("CEPH_MON_KEYRING")) - else: - self.create_osd_startup_script("None", "None") - - # Configure any object storage daemons - osd_re = r"CEPH_OSD_(\d+)_DATA_DIR$" - - for env in os.environ.keys(): - match = re.match(osd_re, env) - if match: - osd_data_dir_env = match.group(0) - osd_id = match.group(1) - - self.create_osd_data_dir(osd_id, os.environ.get(osd_data_dir_env)) - - # Configure any mds daemons - if "CEPH_MDS" in os.environ: - self.create_mds_data_dir() - - # Create a fake 'partprobe' - fake_partprobe_filename = self.dest_dir + "/sbin/partprobe" - fake_partprobe = open(fake_partprobe_filename, 'w') - fake_partprobe.write("#!/bin/bash\nexit 0;\n") - fake_partprobe.close() - os.chmod(fake_partprobe_filename, executable_file_permissions) - self.create_startup_scripts() - - def copy_to_img(self, src_file, dest_file): - shutil.copy(src_file, self.dest_dir + dest_file) - - def copy_bootstrap_osd(self, src_file): - self.copy_to_img(src_file, - os.path.join(self.bootstrap_osd_dir, "{}.keyring".format(self.cluster_name))) - - def copy_bootstrap_mds(self, src_file): - self.copy_to_img(src_file, - os.path.join(self.bootstrap_mds_dir, "{}.keyring".format(self.cluster_name))) - - def symlink_to_multiuser(self, fname): - print >> sys.stderr, os.path.join("../", fname) - print >> sys.stderr, self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname) - os.symlink(os.path.join("../", fname), - self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname)) - - def create_mon_data_dir(self, src_keyring): - - #Create the monitor data directory - mon_data_dir = os.path.join(self.mon_dir, "{}-{}".format(self.cluster_name, self.hostname)) - os.makedirs(self.dest_dir + mon_data_dir) - - #Create sysvinit file to start via sysvinit - sysvinit_file = os.path.join(mon_data_dir, "sysvinit") - open(self.dest_dir + sysvinit_file, 'a').close() - - #Create systemd file to initialize the monitor data directory - keyring = "" - if src_keyring: - #Copy the keyring from local to the image - dest_keyring = os.path.join(self.tmp_dir, - "{}-{}.mon.keyring".format(self.cluster_name, self.hostname)) - self.copy_to_img(src_keyring, dest_keyring) - keyring = "--keyring " + dest_keyring - - mon_systemd_fname = systemd_monitor_fname_template - systemd_script_name = self.dest_dir + os.path.join(self.systemd_dir, mon_systemd_fname) - mon_systemd = open(systemd_script_name, 'w') - mon_systemd.write(systemd_monitor_template) - mon_systemd.close() - #Create a symlink to the multi user target - self.symlink_to_multiuser(mon_systemd_fname) - - def create_osd_data_dir(self, osd_id, data_dir): - if not data_dir: - data_dir = '/srv/osd' + osd_id - - #Create the osd data dir - os.makedirs(self.dest_dir + data_dir) - - def create_osd_startup_script(self, osd_id, data_dir): - osd_systemd_fname = systemd_osd_fname_template - osd_full_name = self.dest_dir + os.path.join(self.systemd_dir, osd_systemd_fname) - - osd_systemd = open(osd_full_name, 'w') - - osd_systemd.write(systemd_osd_template) - osd_systemd.close() - - #Create a symlink to the multi user target - self.symlink_to_multiuser(osd_systemd_fname) - - def create_mds_data_dir(self): - - #Create the monitor data directory - mds_data_dir = os.path.join(self.mds_dir, "{}-{}".format(self.cluster_name, self.hostname)) - os.makedirs(self.dest_dir + mds_data_dir) - - #Create sysvinit file to start via sysvinit - sysvinit_file = os.path.join(mds_data_dir, "sysvinit") - open(self.dest_dir + sysvinit_file, 'a').close() - - - def create_startup_scripts(self): - head_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-head") - - ceph_head_setup = open(head_setup_file, "w") - ceph_head_setup.write(ceph_monitor_config_template) - ceph_head_setup.close() - os.chmod(head_setup_file, executable_file_permissions) - - osd_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-node") - ceph_node_setup = open(osd_setup_file, "w") - ceph_node_setup.write(ceph_storage_config_template) - ceph_node_setup.close() - os.chmod(osd_setup_file, executable_file_permissions) - - -CephConfigurationExtension().run() diff --git a/cloud-init.configure b/cloud-init.configure deleted file mode 100755 index aa83e0e2..00000000 --- a/cloud-init.configure +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# This is a "morph deploy" configuration extension to enable the -# cloud-init services. -set -e - -ROOT="$1" - -########################################################################## - -set -e - -case "$CLOUD_INIT" in -''|False|no) - exit 0 - ;; -True|yes) - echo "Configuring cloud-init" - ;; -*) - echo Unrecognised option "$CLOUD_INIT" to CLOUD_INIT - exit 1 - ;; -esac - - -cloud_init_services="cloud-config.service - cloud-init-local.service - cloud-init.service - cloud-final.service" - -# Iterate over the cloud-init services and enable them creating a link -# into /etc/systemd/system/multi-user.target.wants. -# If the services to link are not present, fail. - -services_folder="lib/systemd/system" -for service_name in $cloud_init_services; do - if [ ! -f "$ROOT/$services_folder/$service_name" ]; then - echo "ERROR: Service $service_name is missing." >&2 - echo "Failed to configure cloud-init." - exit 1 - else - echo Enabling systemd service "$service_name" >"$MORPH_LOG_FD" - ln -sf "/$services_folder/$service_name" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$service_name" - fi -done diff --git a/clusters/image-package-example.morph b/clusters/image-package-example.morph index fd8487e2..6472b456 100644 --- a/clusters/image-package-example.morph +++ b/clusters/image-package-example.morph @@ -9,4 +9,4 @@ systems: type: image-package location: image-package-example.tar BOOTLOADER_BLOBS: /usr/share/syslinux/mbr.bin - INCLUDE_SCRIPTS: image-package-example/make-disk-image.sh.in:image-package-example/disk-install.sh.in:image-package-example/common.sh.in + INCLUDE_SCRIPTS: extensions/image-package-example/make-disk-image.sh.in:extensions/image-package-example/disk-install.sh.in:extensions/image-package-example/common.sh.in diff --git a/distbuild.configure b/distbuild.configure deleted file mode 100644 index 062aaecc..00000000 --- a/distbuild.configure +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013-2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configure extension to configure a Baserock -# build node, as part of a distributed building cluster. It uses the -# following variables from the environment: -# -# * DISTBUILD_CONTROLLER: if 'yes', machine is set up as the controller. -# * DISTBUILD_WORKER: if 'yes', machine is set up as a worker. -# * TROVE_ID: hostname and Trove prefix of the server to pull source -# from and push built artifacts to. -# * TROVE_HOST: FQDN of the same server as in TROVE_ID -# -# The following variable is optional: -# -# * ARTIFACT_CACHE_SERVER: by default artifacts are pushed to the same -# Trove that served the source, but you can use a different one. -# -# The following variable is required for worker nodes only: -# -# * CONTROLLERHOST: hostname or IP address of distbuild controller machine. -# * WORKER_SSH_KEY: identity used to authenticate with Trove -# -# The following variable is required for the controller node only: -# -# * WORKERS: hostnames or IP address of worker nodes, comma-separated. - -set -e - -if [ -n "$DISTBUILD_GENERIC" ]; then - echo "Not configuring the distbuild node, it will be generic" - exit 0 -fi - -# Set default values for these two options if they are unset, so that if the -# user specifies no distbuild config at all the configure extension exits -# without doing anything but does not raise an error. -DISTBUILD_CONTROLLER=${DISTBUILD_CONTROLLER-False} -DISTBUILD_WORKER=${DISTBUILD_WORKER-False} - -if [ "$DISTBUILD_CONTROLLER" = False -a "$DISTBUILD_WORKER" = False ]; then - exit 0 -fi - -set -u - -# Check that all the variables needed are present: - -error_vars=false - -if [ "x$TROVE_HOST" = "x" ]; then - echo "ERROR: TROVE_HOST needs to be defined." - error_vars=true -fi - -if [ "x$TROVE_ID" = "x" ]; then - echo "ERROR: TROVE_ID needs to be defined." - error_vars=true -fi - -if [ "$DISTBUILD_WORKER" = True ]; then - if ! ssh-keygen -lf "$WORKER_SSH_KEY" > /dev/null 2>&1; then - echo "ERROR: WORKER_SSH_KEY is not a vaild ssh key." - error_vars=true - fi - - if [ "x$CONTROLLERHOST" = "x" ]; then - echo "ERROR: CONTROLLERHOST needs to be defined." - error_vars=true - fi -fi - -if [ "$DISTBUILD_CONTROLLER" = True ]; then - if [ "x$WORKERS" = "x" ]; then - echo "ERROR: WORKERS needs to be defined." - error_vars=true - fi -fi - -if "$error_vars"; then - exit 1 -fi - - -ROOT="$1" - -DISTBUILD_DATA="$ROOT/etc/distbuild" -mkdir -p "$DISTBUILD_DATA" - -# If it's a worker, install the worker ssh key. -if [ "$DISTBUILD_WORKER" = True ] -then - install -m 0644 "$WORKER_SSH_KEY" "$DISTBUILD_DATA/worker.key" -fi - - - -# Create the configuration file -python <<'EOF' >"$DISTBUILD_DATA/distbuild.conf" -import os, sys, yaml - -distbuild_configuration={ - 'TROVE_ID': os.environ['TROVE_ID'], - 'TROVE_HOST': os.environ['TROVE_HOST'], - 'DISTBUILD_WORKER': os.environ['DISTBUILD_WORKER'], - 'DISTBUILD_CONTROLLER': os.environ['DISTBUILD_CONTROLLER'], - 'WORKER_SSH_KEY': '/etc/distbuild/worker.key', -} - - -optional_keys = ('ARTIFACT_CACHE_SERVER', 'CONTROLLERHOST', 'WORKERS', - 'TROVE_BACKUP_KEYS') - -for key in optional_keys: - if key in os.environ: - distbuild_configuration[key] = os.environ[key] - -yaml.dump(distbuild_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/extensions/busybox-init.configure b/extensions/busybox-init.configure new file mode 100644 index 00000000..c7dba3b9 --- /dev/null +++ b/extensions/busybox-init.configure @@ -0,0 +1,145 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to configure a system +# to use busybox for its init, if INIT_SYSTEM=busybox is specified. +# +# As well as checking INIT_SYSTEM, the following variables are used. +# +# Getty configuration: +# * CONSOLE_DEVICE: Which device to spawn a getty on (default: ttyS0) +# * CONSOLE_BAUDRATE: Baud rate of the console (default: 115200) +# * CONSOLE_MODE: What kind of terminal this console emulates +# (default: vt100) + +if [ "$INIT_SYSTEM" != busybox ]; then + echo Not configuring system to use busybox init. + exit 0 +fi + +set -e +echo Configuring system to use busybox init + +RUN_SCRIPT=/etc/rcS +INIT_SCRIPT=/sbin/init + +install_mdev_config(){ + install -D -m644 /dev/stdin "$1" <<'EOF' +# support module loading on hotplug +$MODALIAS=.* root:root 660 @modprobe "$MODALIAS" + +# null may already exist; therefore ownership has to be changed with command +null root:root 666 @chmod 666 $MDEV +zero root:root 666 +full root:root 666 +random root:root 444 +urandom root:root 444 +hwrandom root:root 444 +grsec root:root 660 + +kmem root:root 640 +mem root:root 640 +port root:root 640 +# console may already exist; therefore ownership has to be changed with command +console root:root 600 @chmod 600 $MDEV +ptmx root:root 666 +pty.* root:root 660 + +# Typical devices + +tty root:root 666 +tty[0-9]* root:root 660 +vcsa*[0-9]* root:root 660 +ttyS[0-9]* root:root 660 + +# block devices +ram[0-9]* root:root 660 +loop[0-9]+ root:root 660 +sd[a-z].* root:root 660 +hd[a-z][0-9]* root:root 660 +md[0-9]* root:root 660 +sr[0-9]* root:root 660 @ln -sf $MDEV cdrom +fd[0-9]* root:root 660 + +# net devices +SUBSYSTEM=net;.* root:root 600 @nameif +tun[0-9]* root:root 600 =net/ +tap[0-9]* root:root 600 =net/ +EOF +} + +install_start_script(){ + install -D -m755 /dev/stdin "$1" <<'EOF' +#!/bin/sh +mount -t devtmpfs devtmpfs /dev +mount -t proc proc /proc +mount -t sysfs sysfs /sys +mkdir -p /dev/pts +mount -t devpts devpts /dev/pts + +echo /sbin/mdev >/proc/sys/kernel/hotplug +mdev -s + +hostname -F /etc/hostname + +run-parts -a start /etc/init.d +EOF +} + +install_inittab(){ + local inittab="$1" + local dev="$2" + local baud="$3" + local mode="$4" + install -D -m644 /dev/stdin "$1" <&2 + exit 1 +} + +install_mdev_config "$1/etc/mdev.conf" + +install_start_script "$1$RUN_SCRIPT" + +install_inittab "$1/etc/inittab" "${CONSOLE_DEV-ttyS0}" \ + "${CONSOLE_BAUD-115200}" "${CONSOLE_MODE-vt100}" + +install_init_symlink "$1$INIT_SCRIPT" diff --git a/extensions/ceph.configure b/extensions/ceph.configure new file mode 100644 index 00000000..c3cd92d1 --- /dev/null +++ b/extensions/ceph.configure @@ -0,0 +1,266 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License.5 +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import cliapp +import sys +import os +import subprocess +import shutil +import re +import stat + +systemd_monitor_template = """ +[Unit] +Description=Ceph Monitor firstboot setup +After=network-online.target + +[Service] +ExecStart=/bin/bash -c "/root/setup-ceph-head | tee /root/monitor-setuplog" +ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-monitor-fboot.service + +[Install] +Wanted-By=multi-user.target +""" + +systemd_monitor_fname_template = "ceph-monitor-fboot.service" + +systemd_osd_template = """ +[Unit] +Description=Ceph osd firstboot setup +After=network-online.target + +[Service] +ExecStart=/bin/bash -c "/root/setup-ceph-node | tee /root/storage-setuplog" +ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-storage-fboot.service + +[Install] +Wanted-By=multi-user.target +""" +systemd_osd_fname_template = "ceph-storage-fboot.service" + +ceph_monitor_config_template = """#!/bin/bash +ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' +ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' +ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring +monmaptool --create --add 0 10.0.100.2 --fsid 9ceb9257-7541-4de4-b34b-586079986700 /tmp/monmap +mkdir /var/lib/ceph/mon/ceph-0 +ceph-mon --mkfs -i 0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring +/etc/init.d/ceph start mon.0 +touch ~/monitor-configured +""" + +ceph_storage_config_template = """#!/bin/bash +scp 10.0.100.2:/var/lib/ceph/bootstrap-osd/ceph.keyring /var/lib/ceph/bootstrap-osd/ +echo -e "n\np\n1\n\n\nw\n" | fdisk /dev/sdb +ceph-disk prepare --cluster ceph --cluster-uuid 9ceb9257-7541-4de4-b34b-586079986700 --fs-type ext4 /dev/sdb1 +sudo ceph-disk activate /dev/sdb1 +/etc/init.d/ceph start osd.0 +touch ~/storage-configured +""" + +executable_file_permissions = stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR | \ + stat.S_IXGRP | stat.S_IRGRP | \ + stat.S_IXOTH | stat.S_IROTH + +class CephConfigurationExtension(cliapp.Application): + """ + Set up ceph server daemons. + + Must include the following environment variables: + + HOSTNAME - Must be defined it is used as the ID for + the monitor and metadata daemons. + CEPH_CONF - Provide a ceph configuration file. + + Optional environment variables: + + CEPH_CLUSTER - Cluster name, if not provided defaults to 'ceph'. + + CEPH_BOOTSTRAP_OSD - Registered key capable of generating OSD + keys. + CEPH_BOOTSTRAP_MDS - Registered key capable of generating MDS + keys. + + Bootstrap keys are required for creating OSD daemons on servers + that do not have a running monitor daemon. They are gathered + by 'ceph-deploy gatherkeys' but can be generated and registered + separately. + + CEPH_MON - (Blank) Create a ceph monitor daemon on the image. + CEPH_MON_KEYRING - Location of monitor keyring. Required by the + monitor if using cephx authentication. + + CEPH_OSD_X_DATA_DIR - Location of data directory for OSD. + Create an OSD daemon on image. 'X' is an integer + id, many osd daemons may be run on same server. + + CEPH_MDS - (Blank) Create a metadata server daemon on server. + """ + + def process_args(self, args): + + if "HOSTNAME" not in os.environ: + print "ERROR: Need a hostname defined by 'HOSTNAME'" + sys.exit(1) + if "CEPH_CLUSTER" not in os.environ: + print "ERROR: Need a cluster name defined by 'CEPH_CLUSTER'" + sys.exit(1) + if "CEPH_CONF" not in os.environ: + print "ERROR: Need a ceph conf file defined by 'CEPH_CONF'" + sys.exit(1) + + self.dest_dir = args[0] + + self.cluster_name = os.environ["CEPH_CLUSTER"] + self.hostname = os.environ["HOSTNAME"] + + self.conf_file = "/etc/ceph/{}.conf".format(self.cluster_name) + self.mon_dir = "/var/lib/ceph/mon/" + self.osd_dir = "/var/lib/ceph/osd/" + self.mds_dir = "/var/lib/ceph/mds/" + self.tmp_dir = "/var/lib/ceph/tmp/" + self.bootstrap_mds_dir = "/var/lib/ceph/bootstrap-mds/" + self.bootstrap_osd_dir = "/var/lib/ceph/bootstrap-osd/" + self.systemd_dir = "/etc/systemd/system/" + self.systemd_multiuser_dir = "/etc/systemd/system/multi-user.target.wants/" + + self.copy_to_img(os.environ["CEPH_CONF"], self.conf_file) + + # Copy over bootstrap keyrings + if "CEPH_BOOTSTRAP_OSD" in os.environ: + self.copy_bootstrap_osd(os.environ["CEPH_BOOTSTRAP_OSD"]); + if "CEPH_BOOTSTRAP_MDS" in os.environ: + self.copy_bootstrap_mds(os.environ["CEPH_BOOTSTRAP_MDS"]); + + # Configure any monitor daemons + if "CEPH_MON" in os.environ: + self.create_mon_data_dir(os.environ.get("CEPH_MON_KEYRING")) + else: + self.create_osd_startup_script("None", "None") + + # Configure any object storage daemons + osd_re = r"CEPH_OSD_(\d+)_DATA_DIR$" + + for env in os.environ.keys(): + match = re.match(osd_re, env) + if match: + osd_data_dir_env = match.group(0) + osd_id = match.group(1) + + self.create_osd_data_dir(osd_id, os.environ.get(osd_data_dir_env)) + + # Configure any mds daemons + if "CEPH_MDS" in os.environ: + self.create_mds_data_dir() + + # Create a fake 'partprobe' + fake_partprobe_filename = self.dest_dir + "/sbin/partprobe" + fake_partprobe = open(fake_partprobe_filename, 'w') + fake_partprobe.write("#!/bin/bash\nexit 0;\n") + fake_partprobe.close() + os.chmod(fake_partprobe_filename, executable_file_permissions) + self.create_startup_scripts() + + def copy_to_img(self, src_file, dest_file): + shutil.copy(src_file, self.dest_dir + dest_file) + + def copy_bootstrap_osd(self, src_file): + self.copy_to_img(src_file, + os.path.join(self.bootstrap_osd_dir, "{}.keyring".format(self.cluster_name))) + + def copy_bootstrap_mds(self, src_file): + self.copy_to_img(src_file, + os.path.join(self.bootstrap_mds_dir, "{}.keyring".format(self.cluster_name))) + + def symlink_to_multiuser(self, fname): + print >> sys.stderr, os.path.join("../", fname) + print >> sys.stderr, self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname) + os.symlink(os.path.join("../", fname), + self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname)) + + def create_mon_data_dir(self, src_keyring): + + #Create the monitor data directory + mon_data_dir = os.path.join(self.mon_dir, "{}-{}".format(self.cluster_name, self.hostname)) + os.makedirs(self.dest_dir + mon_data_dir) + + #Create sysvinit file to start via sysvinit + sysvinit_file = os.path.join(mon_data_dir, "sysvinit") + open(self.dest_dir + sysvinit_file, 'a').close() + + #Create systemd file to initialize the monitor data directory + keyring = "" + if src_keyring: + #Copy the keyring from local to the image + dest_keyring = os.path.join(self.tmp_dir, + "{}-{}.mon.keyring".format(self.cluster_name, self.hostname)) + self.copy_to_img(src_keyring, dest_keyring) + keyring = "--keyring " + dest_keyring + + mon_systemd_fname = systemd_monitor_fname_template + systemd_script_name = self.dest_dir + os.path.join(self.systemd_dir, mon_systemd_fname) + mon_systemd = open(systemd_script_name, 'w') + mon_systemd.write(systemd_monitor_template) + mon_systemd.close() + #Create a symlink to the multi user target + self.symlink_to_multiuser(mon_systemd_fname) + + def create_osd_data_dir(self, osd_id, data_dir): + if not data_dir: + data_dir = '/srv/osd' + osd_id + + #Create the osd data dir + os.makedirs(self.dest_dir + data_dir) + + def create_osd_startup_script(self, osd_id, data_dir): + osd_systemd_fname = systemd_osd_fname_template + osd_full_name = self.dest_dir + os.path.join(self.systemd_dir, osd_systemd_fname) + + osd_systemd = open(osd_full_name, 'w') + + osd_systemd.write(systemd_osd_template) + osd_systemd.close() + + #Create a symlink to the multi user target + self.symlink_to_multiuser(osd_systemd_fname) + + def create_mds_data_dir(self): + + #Create the monitor data directory + mds_data_dir = os.path.join(self.mds_dir, "{}-{}".format(self.cluster_name, self.hostname)) + os.makedirs(self.dest_dir + mds_data_dir) + + #Create sysvinit file to start via sysvinit + sysvinit_file = os.path.join(mds_data_dir, "sysvinit") + open(self.dest_dir + sysvinit_file, 'a').close() + + + def create_startup_scripts(self): + head_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-head") + + ceph_head_setup = open(head_setup_file, "w") + ceph_head_setup.write(ceph_monitor_config_template) + ceph_head_setup.close() + os.chmod(head_setup_file, executable_file_permissions) + + osd_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-node") + ceph_node_setup = open(osd_setup_file, "w") + ceph_node_setup.write(ceph_storage_config_template) + ceph_node_setup.close() + os.chmod(osd_setup_file, executable_file_permissions) + + +CephConfigurationExtension().run() diff --git a/extensions/cloud-init.configure b/extensions/cloud-init.configure new file mode 100755 index 00000000..aa83e0e2 --- /dev/null +++ b/extensions/cloud-init.configure @@ -0,0 +1,63 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# +# This is a "morph deploy" configuration extension to enable the +# cloud-init services. +set -e + +ROOT="$1" + +########################################################################## + +set -e + +case "$CLOUD_INIT" in +''|False|no) + exit 0 + ;; +True|yes) + echo "Configuring cloud-init" + ;; +*) + echo Unrecognised option "$CLOUD_INIT" to CLOUD_INIT + exit 1 + ;; +esac + + +cloud_init_services="cloud-config.service + cloud-init-local.service + cloud-init.service + cloud-final.service" + +# Iterate over the cloud-init services and enable them creating a link +# into /etc/systemd/system/multi-user.target.wants. +# If the services to link are not present, fail. + +services_folder="lib/systemd/system" +for service_name in $cloud_init_services; do + if [ ! -f "$ROOT/$services_folder/$service_name" ]; then + echo "ERROR: Service $service_name is missing." >&2 + echo "Failed to configure cloud-init." + exit 1 + else + echo Enabling systemd service "$service_name" >"$MORPH_LOG_FD" + ln -sf "/$services_folder/$service_name" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$service_name" + fi +done diff --git a/extensions/distbuild.configure b/extensions/distbuild.configure new file mode 100644 index 00000000..062aaecc --- /dev/null +++ b/extensions/distbuild.configure @@ -0,0 +1,132 @@ +#!/bin/sh +# Copyright (C) 2013-2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configure extension to configure a Baserock +# build node, as part of a distributed building cluster. It uses the +# following variables from the environment: +# +# * DISTBUILD_CONTROLLER: if 'yes', machine is set up as the controller. +# * DISTBUILD_WORKER: if 'yes', machine is set up as a worker. +# * TROVE_ID: hostname and Trove prefix of the server to pull source +# from and push built artifacts to. +# * TROVE_HOST: FQDN of the same server as in TROVE_ID +# +# The following variable is optional: +# +# * ARTIFACT_CACHE_SERVER: by default artifacts are pushed to the same +# Trove that served the source, but you can use a different one. +# +# The following variable is required for worker nodes only: +# +# * CONTROLLERHOST: hostname or IP address of distbuild controller machine. +# * WORKER_SSH_KEY: identity used to authenticate with Trove +# +# The following variable is required for the controller node only: +# +# * WORKERS: hostnames or IP address of worker nodes, comma-separated. + +set -e + +if [ -n "$DISTBUILD_GENERIC" ]; then + echo "Not configuring the distbuild node, it will be generic" + exit 0 +fi + +# Set default values for these two options if they are unset, so that if the +# user specifies no distbuild config at all the configure extension exits +# without doing anything but does not raise an error. +DISTBUILD_CONTROLLER=${DISTBUILD_CONTROLLER-False} +DISTBUILD_WORKER=${DISTBUILD_WORKER-False} + +if [ "$DISTBUILD_CONTROLLER" = False -a "$DISTBUILD_WORKER" = False ]; then + exit 0 +fi + +set -u + +# Check that all the variables needed are present: + +error_vars=false + +if [ "x$TROVE_HOST" = "x" ]; then + echo "ERROR: TROVE_HOST needs to be defined." + error_vars=true +fi + +if [ "x$TROVE_ID" = "x" ]; then + echo "ERROR: TROVE_ID needs to be defined." + error_vars=true +fi + +if [ "$DISTBUILD_WORKER" = True ]; then + if ! ssh-keygen -lf "$WORKER_SSH_KEY" > /dev/null 2>&1; then + echo "ERROR: WORKER_SSH_KEY is not a vaild ssh key." + error_vars=true + fi + + if [ "x$CONTROLLERHOST" = "x" ]; then + echo "ERROR: CONTROLLERHOST needs to be defined." + error_vars=true + fi +fi + +if [ "$DISTBUILD_CONTROLLER" = True ]; then + if [ "x$WORKERS" = "x" ]; then + echo "ERROR: WORKERS needs to be defined." + error_vars=true + fi +fi + +if "$error_vars"; then + exit 1 +fi + + +ROOT="$1" + +DISTBUILD_DATA="$ROOT/etc/distbuild" +mkdir -p "$DISTBUILD_DATA" + +# If it's a worker, install the worker ssh key. +if [ "$DISTBUILD_WORKER" = True ] +then + install -m 0644 "$WORKER_SSH_KEY" "$DISTBUILD_DATA/worker.key" +fi + + + +# Create the configuration file +python <<'EOF' >"$DISTBUILD_DATA/distbuild.conf" +import os, sys, yaml + +distbuild_configuration={ + 'TROVE_ID': os.environ['TROVE_ID'], + 'TROVE_HOST': os.environ['TROVE_HOST'], + 'DISTBUILD_WORKER': os.environ['DISTBUILD_WORKER'], + 'DISTBUILD_CONTROLLER': os.environ['DISTBUILD_CONTROLLER'], + 'WORKER_SSH_KEY': '/etc/distbuild/worker.key', +} + + +optional_keys = ('ARTIFACT_CACHE_SERVER', 'CONTROLLERHOST', 'WORKERS', + 'TROVE_BACKUP_KEYS') + +for key in optional_keys: + if key in os.environ: + distbuild_configuration[key] = os.environ[key] + +yaml.dump(distbuild_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/hosts b/extensions/hosts new file mode 100644 index 00000000..5b97818d --- /dev/null +++ b/extensions/hosts @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/extensions/image-package-example/README b/extensions/image-package-example/README new file mode 100644 index 00000000..c1322f25 --- /dev/null +++ b/extensions/image-package-example/README @@ -0,0 +1,9 @@ +Image package example scripts +============================= + +These are scripts used to create disk images or install the system onto +an existing disk. + +This is also implemented independently for the rawdisk.write write +extension; see morphlib.writeexts.WriteExtension.create_local_system() +for a similar, python implementation. diff --git a/extensions/image-package-example/common.sh.in b/extensions/image-package-example/common.sh.in new file mode 100644 index 00000000..9a7389a7 --- /dev/null +++ b/extensions/image-package-example/common.sh.in @@ -0,0 +1,72 @@ +#!/bin/false +# Script library to be used by disk-install.sh and make-disk-image.sh + +status(){ + echo "$@" +} + +info(){ + echo "$@" >&2 +} + +warn(){ + echo "$@" >&2 +} + +extract_rootfs(){ + tar -C "$1" -xf @@ROOTFS_TAR_PATH@@ . +} + +make_disk_image(){ + truncate --size "$1" "$2" +} + +format_disk(){ + local disk="$1" + mkfs.ext4 -F -L rootfs "$disk" +} + +install_fs_config(){ + local mountpoint="$1" + local rootdisk="${2-/dev/vda}" + cat >>"$mountpoint/etc/fstab" <&2 + exit 1 +} + +warn(){ + echo "$@" >&2 +} + +info(){ + echo "$@" >&2 +} + +shellescape(){ + echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" +} + +sedescape(){ + # Escape the passed in string so it can be safely interpolated into + # a sed expression as a literal value. + echo "$1" | sed -e 's/[\/&]/\\&/g' +} + +ROOTDIR="$1" +OUTPUT_TAR="$2" +td="$(mktemp -d)" +IMAGE_SUBDIR="${IMAGE_SUBDIR-image_files}" +SCRIPT_SUBDIR="${SCRIPT_SUBDIR-tools}" +ROOTFS_TAR="${ROOTFS_TAR-rootfs.tar}" + +# Generate shell snippets that will expand to paths to various resources +# needed by the scripts. +# They expand to a single shell word, so constructs like the following work +# SCRIPT_DIR=@@SCRIPT_DIR@@ +# dd if="$SCRIPT_DIR/mbr" of="$disk" count=1 +# tar -C "$mountpoint" -xf @@ROOTFS_TAR_PATH@@ . +find_script_dir='"$(readlink -f "$(dirname "$0")")"' +image_dir="$find_script_dir/../$(shellescape "$IMAGE_SUBDIR")" +rootfs_tar_path="$image_dir/$(shellescape "$ROOTFS_TAR")" + +install_script(){ + local source_file="$1" + local output_dir="$2" + local target_file="$output_dir/$SCRIPT_SUBDIR/$(basename "$source_file" .in)" + sed -e "s/@@SCRIPT_DIR@@/$(sedescape "$find_script_dir")/g" \ + -e "s/@@IMAGE_DIR@@/$(sedescape "$image_dir")/g" \ + -e "s/@@ROOTFS_TAR_PATH@@/$(sedescape "$rootfs_tar_path")/g" \ + "$source_file" \ + | install -D -m 755 /proc/self/fd/0 "$target_file" +} + +install_scripts(){ + local output_dir="$1" + ( + IFS="${INCLUDE_SCRIPTS_SEPARATOR-:}" + for script in $INCLUDE_SCRIPTS; do + local script_path="$(pwd)/$script" + if [ ! -e "$script_path" ]; then + warn Script "$script" not found, ignoring + continue + fi + install_script "$script" "$output_dir" + done + ) +} + +install_bootloader_blobs(){ + local output_dir="$1" + local image_dir="$output_dir/$IMAGE_SUBDIR" + ( + IFS="${BOOTLOADER_BLOBS_SEPARATOR-:}" + for blob in $BOOTLOADER_BLOBS; do + local blob_path="$ROOTDIR/$blob" + if [ ! -e "$blob_path" ]; then + warn Bootloader blob "$blob" not found, ignoring + continue + fi + install -D -m644 "$blob_path" "$image_dir/$(basename "$blob_path")" + done + ) +} + +# Determine a basename for our directory as the same as our tarball with +# extensions removed. This is needed, since tarball packages usually +# have a base directory of its contents, rather then extracting into the +# current directory. +output_dir="$(basename "$OUTPUT_TAR")" +for ext in .xz .bz2 .gzip .gz .tgz .tar; do + output_dir="${output_dir%$ext}" +done + +info Installing scripts +install_scripts "$td/$output_dir" + +info Installing bootloader blobs +install_bootloader_blobs "$td/$output_dir" + +info Writing rootfs tar to "$IMAGE_SUBDIR/$ROOTFS_TAR" +tar -C "$ROOTDIR" -c . \ +| sh -c "${ROOTFS_COMPRESS-cat}" >"$td/$output_dir/$IMAGE_SUBDIR/$ROOTFS_TAR" + +info Writing image package tar to "$OUTPUT_TAR" +tar -C "$td" -c "$output_dir" | sh -c "${OUTPUT_COMPRESS-cat}" >"$OUTPUT_TAR" diff --git a/extensions/installer.configure b/extensions/installer.configure new file mode 100755 index 00000000..a77dc851 --- /dev/null +++ b/extensions/installer.configure @@ -0,0 +1,48 @@ +#!/usr/bin/python +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to configure an installer +# system. It will create the configuration needed in the installer system +# to perform an installation. It uses the following variables from the +# environment: +# +# * INSTALLER_TARGET_STORAGE_DEVICE +# * INSTALLER_ROOTFS_TO_INSTALL +# * INSTALLER_POST_INSTALL_COMMAND (optional, defaults to `reboot -f`) + +import os +import sys +import yaml + +install_config_file = os.path.join(sys.argv[1], 'etc', 'install.conf') + +try: + installer_configuration = { + 'INSTALLER_TARGET_STORAGE_DEVICE': os.environ['INSTALLER_TARGET_STORAGE_DEVICE'], + 'INSTALLER_ROOTFS_TO_INSTALL': os.environ['INSTALLER_ROOTFS_TO_INSTALL'], + } +except KeyError as e: + print "Not configuring as an installer system" + sys.exit(0) + +postinstkey = 'INSTALLER_POST_INSTALL_COMMAND' +installer_configuration[postinstkey] = os.environ.get(postinstkey, 'reboot -f') + +with open(install_config_file, 'w') as f: + f.write( yaml.dump(installer_configuration, default_flow_style=False) ) + +print "Configuration of the installer system in %s" % install_config_file diff --git a/extensions/jffs2.write b/extensions/jffs2.write new file mode 100644 index 00000000..46b69a53 --- /dev/null +++ b/extensions/jffs2.write @@ -0,0 +1,64 @@ +#!/usr/bin/python +#-*- coding: utf-8 -*- +# Copyright © 2015 Codethink Limited + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''A Morph deployment write extension for creating images with jffs2 + as the root filesystem.''' + + +import cliapp +import os + +import morphlib.writeexts + + +class Jffs2WriteExtension(morphlib.writeexts.WriteExtension): + + '''See jffs2.write.help for documentation.''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + + try: + self.create_jffs2_system(temp_root, location) + self.status(msg='Disk image has been created at %(location)s', + location = location) + except Exception: + self.status(msg='Failure to deploy system to %(location)s', + location = location) + raise + + def create_jffs2_system(self, temp_root, location): + erase_block = self.get_erase_block_size() + cliapp.runcmd( + ['mkfs.jffs2', '--pad', '--no-cleanmarkers', + '--eraseblock='+erase_block, '-d', temp_root, '-o', location]) + + def get_erase_block_size(self): + erase_block = os.environ.get('ERASE_BLOCK', '') + + if erase_block == '': + raise cliapp.AppException('ERASE_BLOCK was not given') + + if not erase_block.isdigit(): + raise cliapp.AppException('ERASE_BLOCK must be a whole number') + + return erase_block + +Jffs2WriteExtension().run() diff --git a/extensions/jffs2.write.help b/extensions/jffs2.write.help new file mode 100644 index 00000000..059a354b --- /dev/null +++ b/extensions/jffs2.write.help @@ -0,0 +1,28 @@ +#-*- coding: utf-8 -*- +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Creates a system produced by Morph build with a jffs2 filesystem and then + writes to an image. To use this extension, the host system must have access + to mkfs.jffs2 which is provided in the mtd-utilities.morph stratum. + + Parameters: + + * location: the pathname of the disk image to be created/upgraded, or the + path to the physical device. + + * ERASE_BLOCK: the erase block size of the target system, which can be + found in '/sys/class/mtd/mtdx/erasesize' diff --git a/extensions/mason.configure b/extensions/mason.configure new file mode 100644 index 00000000..1198ebd0 --- /dev/null +++ b/extensions/mason.configure @@ -0,0 +1,153 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to fully configure +# a Mason instance at deployment time. It uses the following variables +# from the environment: +# +# * ARTIFACT_CACHE_SERVER +# * MASON_CLUSTER_MORPHOLOGY +# * MASON_DEFINITIONS_REF +# * MASON_DISTBUILD_ARCH +# * MASON_TEST_HOST +# * OPENSTACK_NETWORK_ID +# * TEST_INFRASTRUCTURE_TYPE +# * TROVE_HOST +# * TROVE_ID +# * CONTROLLERHOST + +set -e + +########################################################################## +# Copy Mason files into root filesystem +########################################################################## + +ROOT="$1" + +mkdir -p "$ROOT"/usr/lib/mason +cp mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh +cp mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh +cp mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script + +cp mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer + +cp mason/mason.service "$ROOT"/etc/systemd/system/mason.service + +########################################################################## +# Set up httpd web server +########################################################################## + +cp mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service + +mkdir -p "$ROOT"/srv/mason + +cat >>"$ROOT"/etc/httpd.conf <"$MASON_DATA/mason.conf" +import os, sys, yaml + +mason_configuration={ + 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'], + 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'], + 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'], + 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'], + 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'], + 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'], + 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'], + 'TROVE_ID': os.environ['TROVE_ID'], + 'TROVE_HOST': os.environ['TROVE_HOST'], + 'CONTROLLERHOST': os.environ['CONTROLLERHOST'], +} + +yaml.dump(mason_configuration, sys.stdout, default_flow_style=False) +EOF + +if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then + python <<'EOF' >>"$MASON_DATA/mason.conf" +import os, sys, yaml + +openstack_credentials={ + 'OS_USERNAME': os.environ['OPENSTACK_USER'], + 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'], + 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'], + 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'], + 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'], +} + +yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False) +EOF +fi + +########################################################################## +# Enable services +########################################################################## + +ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer +ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service diff --git a/extensions/moonshot-kernel.configure b/extensions/moonshot-kernel.configure new file mode 100644 index 00000000..11d01751 --- /dev/null +++ b/extensions/moonshot-kernel.configure @@ -0,0 +1,33 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to convert a plain +# kernel Image to uImage, for an HP Moonshot m400 cartridge + +set -eu + +case "$MOONSHOT_KERNEL" in + True|yes) + echo "Converting kernel image for Moonshot" + mkimage -A arm -O linux -C none -T kernel -a 0x00080000 \ + -e 0x00080000 -n Linux -d "$1/boot/vmlinux" "$1/boot/uImage" + ;; + *) + echo Unrecognised option "$MOONSHOT_KERNEL" to MOONSHOT_KERNEL + exit 1 + ;; +esac diff --git a/extensions/nfsboot-server.configure b/extensions/nfsboot-server.configure new file mode 100755 index 00000000..9fb48096 --- /dev/null +++ b/extensions/nfsboot-server.configure @@ -0,0 +1,58 @@ +#!/bin/sh +# +# Copyright (C) 2013-2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# +# This is a "morph deploy" configuration extension to set up a server for +# booting over nfs and tftp. +set -e + +ROOT="$1" + +########################################################################## + +nfsboot_root=/srv/nfsboot +tftp_root="$nfsboot_root"/tftp +nfs_root="$nfsboot_root"/nfs +mkdir -p "$ROOT$tftp_root" "$ROOT$nfs_root" + +install -D /dev/stdin "$ROOT/usr/lib/systemd/system/nfsboot-tftp.service" <&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool CEILOMETER_ENABLE_CONTROLLER +check_bool CEILOMETER_ENABLE_COMPUTE + +if ! "$CEILOMETER_ENABLE_CONTROLLER" && \ + ! "$CEILOMETER_ENABLE_COMPUTE"; then + exit 0 +fi + +if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$CEILOMETER_SERVICE_USER" -o \ + -z "$CEILOMETER_SERVICE_PASSWORD" -o \ + -z "$CEILOMETER_DB_USER" -o \ + -z "$CEILOMETER_DB_PASSWORD" -o \ + -z "$METERING_SECRET" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$CONTROLLER_HOST_ADDRESS" ]; then + echo Some options required for Ceilometer were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +if "$CEILOMETER_ENABLE_COMPUTE" || "$CEILOMETER_ENABLE_CONTROLLER"; then + enable openstack-ceilometer-config-setup +fi +if "$CEILOMETER_ENABLE_COMPUTE"; then + enable openstack-ceilometer-compute +fi +if "$CEILOMETER_ENABLE_CONTROLLER"; then + enable openstack-ceilometer-db-setup + enable openstack-ceilometer-api + enable openstack-ceilometer-collector + enable openstack-ceilometer-notification + enable openstack-ceilometer-central + enable openstack-ceilometer-alarm-evaluator + enable openstack-ceilometer-alarm-notifier +fi + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/ceilometer.conf" +import os, sys, yaml + +ceilometer_configuration={ + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'CEILOMETER_SERVICE_PASSWORD': os.environ['CEILOMETER_SERVICE_PASSWORD'], + 'CEILOMETER_SERVICE_USER': os.environ['CEILOMETER_SERVICE_USER'], + 'CEILOMETER_DB_USER': os.environ['CEILOMETER_DB_USER'], + 'CEILOMETER_DB_PASSWORD': os.environ['CEILOMETER_DB_PASSWORD'], + 'METERING_SECRET': os.environ['METERING_SECRET'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], +} + +yaml.dump(ceilometer_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-cinder.configure b/extensions/openstack-cinder.configure new file mode 100644 index 00000000..4c32e11a --- /dev/null +++ b/extensions/openstack-cinder.configure @@ -0,0 +1,125 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool CINDER_ENABLE_CONTROLLER +check_bool CINDER_ENABLE_COMPUTE +check_bool CINDER_ENABLE_STORAGE + +if ! "$CINDER_ENABLE_CONTROLLER" && \ + ! "$CINDER_ENABLE_COMPUTE" && \ + ! "$CINDER_ENABLE_STORAGE"; then + exit 0 +fi + +if [ -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$CINDER_DB_USER" -o \ + -z "$CINDER_DB_PASSWORD" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$CINDER_SERVICE_USER" -o \ + -z "$CINDER_SERVICE_PASSWORD" -o \ + -z "$CINDER_DEVICE" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" ]; then + echo Some options required for Cinder were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_STORAGE"; then + enable iscsi-setup + enable target #target.service! + enable iscsid +fi +if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_CONTROLLER" || "$CINDER_ENABLE_STORAGE"; then + enable openstack-cinder-config-setup +fi +if "$CINDER_ENABLE_STORAGE"; then + enable openstack-cinder-lv-setup + enable lvm2-lvmetad + enable openstack-cinder-volume + enable openstack-cinder-backup + enable openstack-cinder-scheduler +fi +if "$CINDER_ENABLE_CONTROLLER"; then + enable openstack-cinder-db-setup + enable openstack-cinder-api +fi + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/cinder.conf" +import os, sys, yaml + +cinder_configuration={ + 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN':os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'CINDER_DB_USER':os.environ['CINDER_DB_USER'], + 'CINDER_DB_PASSWORD':os.environ['CINDER_DB_PASSWORD'], + 'CONTROLLER_HOST_ADDRESS':os.environ['CONTROLLER_HOST_ADDRESS'], + 'CINDER_SERVICE_USER':os.environ['CINDER_SERVICE_USER'], + 'CINDER_SERVICE_PASSWORD':os.environ['CINDER_SERVICE_PASSWORD'], + 'CINDER_DEVICE':os.environ['CINDER_DEVICE'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS':os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], +} + +yaml.dump(cinder_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-glance.configure b/extensions/openstack-glance.configure new file mode 100644 index 00000000..5da08895 --- /dev/null +++ b/extensions/openstack-glance.configure @@ -0,0 +1,101 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool GLANCE_ENABLE_SERVICE + +if ! "$GLANCE_ENABLE_SERVICE"; then + exit 0 +fi + +if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$GLANCE_SERVICE_USER" -o \ + -z "$GLANCE_SERVICE_PASSWORD" -o \ + -z "$GLANCE_DB_USER" -o \ + -z "$GLANCE_DB_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$CONTROLLER_HOST_ADDRESS" ]; then + echo Some options required for Glance were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +enable openstack-glance-setup + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/glance.conf" +import os, sys, yaml + +glance_configuration={ + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'GLANCE_SERVICE_PASSWORD': os.environ['GLANCE_SERVICE_PASSWORD'], + 'GLANCE_SERVICE_USER': os.environ['GLANCE_SERVICE_USER'], + 'GLANCE_DB_USER': os.environ['GLANCE_DB_USER'], + 'GLANCE_DB_PASSWORD': os.environ['GLANCE_DB_PASSWORD'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], +} + +yaml.dump(glance_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-ironic.configure b/extensions/openstack-ironic.configure new file mode 100644 index 00000000..962bbcd1 --- /dev/null +++ b/extensions/openstack-ironic.configure @@ -0,0 +1,155 @@ +#!/bin/sh + +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool IRONIC_ENABLE_SERVICE + +if ! "$IRONIC_ENABLE_SERVICE"; then + exit 0 +fi + +if [ -z "$IRONIC_SERVICE_USER" -o \ + -z "$IRONIC_SERVICE_PASSWORD" -o \ + -z "$IRONIC_DB_USER" -o \ + -z "$IRONIC_DB_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then + echo Some options required for Ironic were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +enable openstack-ironic-setup +enable iscsi-setup +enable target #target.service! +enable iscsid + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/ironic.conf" +import os, sys, yaml + +ironic_configuration={ + 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], + 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], + 'IRONIC_DB_USER': os.environ['IRONIC_DB_USER'], + 'IRONIC_DB_PASSWORD': os.environ['IRONIC_DB_PASSWORD'], + 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + +} + +yaml.dump(ironic_configuration, sys.stdout, default_flow_style=False) +EOF + +########################################################################## +# Configure the TFTP service # +########################################################################## + +tftp_root="/srv/tftp_root/" # trailing slash is essential +mkdir -p "$ROOT/$tftp_root" + +install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.service" << 'EOF' +[Unit] +Description=tftp service for booting kernels +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +EnvironmentFile=/etc/tftp-hpa.conf +ExecStart=/usr/sbin/in.tftpd $TFTP_OPTIONS ${TFTP_ROOT} +StandardInput=socket +StandardOutput=inherit +StandardError=journal + +[Install] +WantedBy=multi-user.target +EOF + +install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.socket" << EOF +[Unit] +Description=Tftp server activation socket + +[Socket] +ListenDatagram=$MANAGEMENT_INTERFACE_IP_ADDRESS:69 +FreeBind=yes + +[Install] +WantedBy=sockets.target +EOF + +install -D -m 644 /dev/stdin "$ROOT"/etc/tftp-hpa.conf << EOF +TFTP_ROOT=$tftp_root +TFTP_OPTIONS="-v -v -v -v -v --map-file $tftp_root/map-file" +EOF + +install -D /dev/stdin -m 644 "$ROOT/$tftp_root"/map-file << EOF +r ^([^/]) $tftp_root\1 +r ^/tftpboot/ $tftp_root\2 +EOF + +cp "$ROOT"/usr/share/syslinux/pxelinux.0 "$ROOT/$tftp_root" diff --git a/extensions/openstack-keystone.configure b/extensions/openstack-keystone.configure new file mode 100644 index 00000000..6b011b14 --- /dev/null +++ b/extensions/openstack-keystone.configure @@ -0,0 +1,123 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool KEYSTONE_ENABLE_SERVICE + +if ! "$KEYSTONE_ENABLE_SERVICE"; then + exit 0 +fi + +if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$KEYSTONE_ADMIN_PASSWORD" -o \ + -z "$KEYSTONE_DB_USER" -o \ + -z "$KEYSTONE_DB_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$CONTROLLER_HOST_ADDRESS" ]; then + echo Some options required for Keystone were defined, but not all. + exit 1 +fi + +python <<'EOF' +import socket +import sys +import os + +try: + socket.inet_pton(socket.AF_INET, os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS']) +except: + print "Error: MANAGEMENT_INTERFACE_IP_ADDRESS is not a valid IP" + sys.exit(1) +EOF + +###################################### +# Enable relevant openstack services # +###################################### + +enable openstack-keystone-setup +enable openstack-horizon-setup +enable postgres-server-setup + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/keystone.conf" +import os, sys, yaml + +keystone_configuration={ + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'KEYSTONE_ADMIN_PASSWORD': os.environ['KEYSTONE_ADMIN_PASSWORD'], + 'KEYSTONE_DB_USER': os.environ['KEYSTONE_DB_USER'], + 'KEYSTONE_DB_PASSWORD': os.environ['KEYSTONE_DB_PASSWORD'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], +} + +yaml.dump(keystone_configuration, sys.stdout, default_flow_style=False) +EOF + +python << 'EOF' > "$OPENSTACK_DATA/postgres.conf" +import os, sys, yaml + +postgres_configuration={ + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], +} + +yaml.dump(postgres_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-network.configure b/extensions/openstack-network.configure new file mode 100644 index 00000000..10be5a1c --- /dev/null +++ b/extensions/openstack-network.configure @@ -0,0 +1,50 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +################### +# Enable services # +################### + +enable openvswitch-setup +enable openstack-network-setup + +########################################################################## +# Generate config variable shell snippet +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/network.conf" +import os, sys, yaml + +network_configuration = {} + +optional_keys = ('EXTERNAL_INTERFACE',) + +network_configuration.update((k, os.environ[k]) for k in optional_keys if k in os.environ) + +yaml.dump(network_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-neutron.configure b/extensions/openstack-neutron.configure new file mode 100644 index 00000000..210222db --- /dev/null +++ b/extensions/openstack-neutron.configure @@ -0,0 +1,138 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/openstack-neutron-$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-neutron-$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool NEUTRON_ENABLE_CONTROLLER +check_bool NEUTRON_ENABLE_MANAGER +check_bool NEUTRON_ENABLE_AGENT + +if ! "$NEUTRON_ENABLE_CONTROLLER" && \ + ! "$NEUTRON_ENABLE_MANAGER" && \ + ! "$NEUTRON_ENABLE_AGENT"; then + exit 0 +fi + +if [ -z "$NEUTRON_SERVICE_USER" -o \ + -z "$NEUTRON_SERVICE_PASSWORD" -o \ + -z "$NEUTRON_DB_USER" -o \ + -z "$NEUTRON_DB_PASSWORD" -o \ + -z "$METADATA_PROXY_SHARED_SECRET" -o \ + -z "$NOVA_SERVICE_USER" -o \ + -z "$NOVA_SERVICE_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then + echo Some options required for Neutron were defined, but not all. + exit 1 +fi + +############################################# +# Ensure /var/run is an appropriate symlink # +############################################# + +if ! link="$(readlink "$ROOT/var/run")" || [ "$link" != ../run ]; then + rm -rf "$ROOT/var/run" + ln -s ../run "$ROOT/var/run" +fi + +################### +# Enable services # +################### + +if "$NEUTRON_ENABLE_CONTROLLER"; then + enable config-setup + enable db-setup + enable server +fi + +if "$NEUTRON_ENABLE_MANAGER"; then + enable config-setup + enable ovs-cleanup + enable dhcp-agent + enable l3-agent + enable plugin-openvswitch-agent + enable metadata-agent +fi + +if "$NEUTRON_ENABLE_AGENT"; then + enable config-setup + enable plugin-openvswitch-agent +fi + +########################################################################## +# Generate config variable shell snippet +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/neutron.conf" +import os, sys, yaml + +nova_configuration={ + 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], + 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], + 'NEUTRON_DB_USER': os.environ['NEUTRON_DB_USER'], + 'NEUTRON_DB_PASSWORD': os.environ['NEUTRON_DB_PASSWORD'], + 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], + 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], + 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], +} + +yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-nova.configure b/extensions/openstack-nova.configure new file mode 100644 index 00000000..213f1852 --- /dev/null +++ b/extensions/openstack-nova.configure @@ -0,0 +1,168 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/openstack-nova-$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool NOVA_ENABLE_CONTROLLER +check_bool NOVA_ENABLE_COMPUTE + +if ! "$NOVA_ENABLE_CONTROLLER" && \ + ! "$NOVA_ENABLE_COMPUTE"; then + exit 0 +fi + +if [ -z "$NOVA_SERVICE_USER" -o \ + -z "$NOVA_SERVICE_PASSWORD" -o \ + -z "$NOVA_DB_USER" -o \ + -z "$NOVA_DB_PASSWORD" -o \ + -z "$NOVA_VIRT_TYPE" -o \ + -z "$NEUTRON_SERVICE_USER" -o \ + -z "$NEUTRON_SERVICE_PASSWORD" -o \ + -z "$IRONIC_SERVICE_USER" -a \ + -z "$IRONIC_SERVICE_PASSWORD" -a \ + -z "$METADATA_PROXY_SHARED_SECRET" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then + echo Some options required for Nova were defined, but not all. + exit 1 +fi + +############################################### +# Enable libvirtd and libvirt-guests services # +############################################### + +wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants +mkdir -p "$wants_dir" +mkdir -p "$ROOT"/var/lock/subsys +ln -sf ../libvirtd.service "$wants_dir/libvirtd.service" + +###################################### +# Enable relevant openstack services # +###################################### + +if "$NOVA_ENABLE_CONTROLLER" || "$NOVA_ENABLE_COMPUTE"; then + enable config-setup +fi +if "$NOVA_ENABLE_CONTROLLER" && ! "$NOVA_ENABLE_COMPUTE"; then + enable conductor +fi +if "$NOVA_ENABLE_COMPUTE"; then + enable compute +fi +if "$NOVA_ENABLE_CONTROLLER"; then + for service in db-setup api cert consoleauth novncproxy scheduler serialproxy; do + enable "$service" + done +fi + +########################################################################## +# Change iprange for the interal libvirt to avoid clashes +# with eth0 ip range +########################################################################## + +sed -i "s/192\.168\.122\./192\.168\.1\./g" \ + "$ROOT"/etc/libvirt/qemu/networks/default.xml + + +########################################################################## +# Generate configuration file +########################################################################## + +case "$NOVA_BAREMETAL_SCHEDULING" in + True|true|yes) + export COMPUTE_MANAGER=ironic.nova.compute.manager.ClusteredComputeManager + export RESERVED_HOST_MEMORY_MB=0 + export SCHEDULER_HOST_MANAGER=nova.scheduler.ironic_host_manager.IronicHostManager + export RAM_ALLOCATION_RATIO=1.0 + export COMPUTE_DRIVER=nova.virt.ironic.IronicDriver + ;; + *) + export COMPUTE_MANAGER=nova.compute.manager.ComputeManager + export RESERVED_HOST_MEMORY_MB=512 + export SCHEDULER_HOST_MANAGER=nova.scheduler.host_manager.HostManager + export RAM_ALLOCATION_RATIO=1.5 + export COMPUTE_DRIVER=libvirt.LibvirtDriver + ;; +esac + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/nova.conf" +import os, sys, yaml + +nova_configuration={ + 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], + 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], + 'NOVA_DB_USER': os.environ['NOVA_DB_USER'], + 'NOVA_DB_PASSWORD': os.environ['NOVA_DB_PASSWORD'], + 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'], + 'COMPUTE_MANAGER': os.environ['COMPUTE_MANAGER'], + 'RESERVED_HOST_MEMORY_MB': os.environ['RESERVED_HOST_MEMORY_MB'], + 'SCHEDULER_HOST_MANAGER': os.environ['SCHEDULER_HOST_MANAGER'], + 'RAM_ALLOCATION_RATIO': os.environ['RAM_ALLOCATION_RATIO'], + 'COMPUTE_DRIVER': os.environ['COMPUTE_DRIVER'], + 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], + 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], + 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], + 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], + 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], +} + +yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-swift-controller.configure b/extensions/openstack-swift-controller.configure new file mode 100644 index 00000000..424ab57b --- /dev/null +++ b/extensions/openstack-swift-controller.configure @@ -0,0 +1,49 @@ +#!/bin/bash +# +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +set -e + +export ROOT="$1" + +MANDATORY_OPTIONS="SWIFT_ADMIN_PASSWORD KEYSTONE_TEMPORARY_ADMIN_TOKEN" + +for option in $MANDATORY_OPTIONS +do + if ! [[ -v $option ]] + then + missing_option=True + echo "Required option $option isn't set!" >&2 + fi +done + +if [[ $missing_option = True ]]; then exit 1; fi + +mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks + +ln -s "/usr/lib/systemd/system/swift-controller-setup.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-controller-setup.service" +ln -s "/usr/lib/systemd/system/memcached.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/memcached.service" +ln -s "/usr/lib/systemd/system/openstack-swift-proxy.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-proxy.service" + +cat << EOF > "$ROOT"/usr/share/openstack/swift-controller-vars.yml +--- +SWIFT_ADMIN_PASSWORD: $SWIFT_ADMIN_PASSWORD +MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS +KEYSTONE_TEMPORARY_ADMIN_TOKEN: $KEYSTONE_TEMPORARY_ADMIN_TOKEN +EOF diff --git a/extensions/pxeboot.check b/extensions/pxeboot.check new file mode 100755 index 00000000..611708a9 --- /dev/null +++ b/extensions/pxeboot.check @@ -0,0 +1,86 @@ +#!/usr/bin/python + +import itertools +import os +import subprocess +import sys +flatten = itertools.chain.from_iterable + +def powerset(iterable): + "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" + s = list(iterable) + return flatten(itertools.combinations(s, r) for r in range(len(s)+1)) + +valid_option_sets = frozenset(( + ('spawn-novlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',))), + ('spawn-vlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', 'PXEBOOT_VLAN'))), + ('existing-dhcp', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', + 'PXEBOOT_CONFIG_TFTP_ADDRESS'))), + ('existing-server', frozenset(('PXEBOOT_CONFIG_TFTP_ADDRESS', + 'PXEBOOT_ROOTFS_RSYNC_ADDRESS'))), +)) +valid_modes = frozenset(mode for mode, opt_set in valid_option_sets) + + +def compute_matches(env): + complete_matches = set() + for mode, opt_set in valid_option_sets: + if all(k in env for k in opt_set): + complete_matches.add(opt_set) + return complete_matches + +complete_matches = compute_matches(os.environ) + +def word_separate_options(options): + assert options + s = options.pop(-1) + if options: + s = '%s and %s' % (', '.join(options), s) + return s + + +valid_options = frozenset(flatten(opt_set for (mode, opt_set) + in valid_option_sets)) +matched_options = frozenset(o for o in valid_options + if o in os.environ) +if not complete_matches: + addable_sets = frozenset(frozenset(os) - matched_options for os in + valid_options + if frozenset(os) - matched_options) + print('Please provide %s' % ' or '.join( + word_separate_options(list(opt_set)) + for opt_set in addable_sets if opt_set)) + sys.exit(1) +elif len(complete_matches) > 1: + removable_sets = frozenset(matched_options - frozenset(os) for os in + powerset(matched_options) + if len(compute_matches(os)) == 1) + print('WARNING: Following options might not be needed: %s' % ' or '.join( + word_separate_options(list(opt_set)) + for opt_set in removable_sets if opt_set)) + +if 'PXEBOOT_MODE' in os.environ: + mode = os.environ['PXEBOOT_MODE'] +else: + try: + mode, = (mode for (mode, opt_set) in valid_option_sets + if all(o in os.environ for o in opt_set)) + + except ValueError as e: + print ('More than one candidate for PXEBOOT_MODE, please ' + 'set a value for it. Type `morph help pxeboot.write for ' + 'more info') + sys.exit(1) + +if mode not in valid_modes: + print('%s is not a valid PXEBOOT_MODE' % mode) + sys.exit(1) + +if mode != 'existing-server': + with open(os.devnull, 'w') as devnull: + if subprocess.call(['systemctl', 'is-active', 'nfs-server'], + stdout=devnull) != 0: + print ('ERROR: nfs-server.service is not running and is needed ' + 'for this deployment. Please, run `systemctl start nfs-server` ' + 'and try `morph deploy` again.') + sys.exit(1) diff --git a/extensions/pxeboot.write b/extensions/pxeboot.write new file mode 100644 index 00000000..3a12ebcc --- /dev/null +++ b/extensions/pxeboot.write @@ -0,0 +1,755 @@ +#!/usr/bin/env python + + +import collections +import contextlib +import errno +import itertools +import logging +import os +import select +import signal +import shutil +import socket +import string +import StringIO +import subprocess +import sys +import tempfile +import textwrap +import urlparse + +import cliapp + +import morphlib + + +def _int_to_quad_dot(i): + return '.'.join(( + str(i >> 24 & 0xff), + str(i >> 16 & 0xff), + str(i >> 8 & 0xff), + str(i & 0xff))) + + +def _quad_dot_to_int(s): + i = 0 + for octet in s.split('.'): + i <<= 8 + i += int(octet, 10) + return i + + +def _netmask_to_prefixlen(mask): + bs = '{:032b}'.format(mask) + prefix = bs.rstrip('0') + if '0' in prefix: + raise ValueError('abnormal netmask: %s' % + _int_to_quad_dot(mask)) + return len(prefix) + + +def _get_routes(): + routes = [] + with open('/proc/net/route', 'r') as f: + for line in list(f)[1:]: + fields = line.split() + destination, flags, mask = fields[1], fields[3], fields[7] + flags = int(flags, 16) + if flags & 2: + # default route, ignore + continue + destination = socket.ntohl(int(destination, 16)) + mask = socket.ntohl(int(mask, 16)) + prefixlen = _netmask_to_prefixlen(mask) + routes.append((destination, prefixlen)) + return routes + + +class IPRange(object): + def __init__(self, prefix, prefixlen): + self.prefixlen = prefixlen + mask = (1 << prefixlen) - 1 + self.mask = mask << (32 - prefixlen) + self.prefix = prefix & self.mask + @property + def bitstring(self): + return ('{:08b}' * 4).format( + self.prefix >> 24 & 0xff, + self.prefix >> 16 & 0xff, + self.prefix >> 8 & 0xff, + self.prefix & 0xff + )[:self.prefixlen] + def startswith(self, other_range): + return self.bitstring.startswith(other_range.bitstring) + + +def find_subnet(valid_ranges, invalid_ranges): + for vr in valid_ranges: + known_subnets = set(ir for ir in invalid_ranges if ir.startswith(vr)) + prefixlens = set(r.prefixlen for r in known_subnets) + prefixlens.add(32 - 2) # need at least 4 addresses in subnet + prefixlen = min(prefixlens) + if prefixlen <= vr.prefixlen: + # valid subnet is full, move on to next + continue + subnetlen = prefixlen - vr.prefixlen + for prefix in (subnetid + vr.prefix + for subnetid in xrange(1 << subnetlen)): + if any(subnet.prefix == prefix for subnet in known_subnets): + continue + return prefix, prefixlen + + +def _normalise_macaddr(macaddr): + '''pxelinux.0 wants the mac address to be lowercase and - separated''' + digits = (c for c in macaddr.lower() if c in string.hexdigits) + nibble_pairs = grouper(digits, 2) + return '-'.join(''.join(byte) for byte in nibble_pairs) + + +@contextlib.contextmanager +def executor(target_pid): + 'Kills a process if its parent dies' + read_fd, write_fd = os.pipe() + helper_pid = os.fork() + if helper_pid == 0: + try: + os.close(write_fd) + while True: + rlist, _, _ = select.select([read_fd], [], []) + if read_fd in rlist: + d = os.read(read_fd, 1) + if not d: + os.kill(target_pid, signal.SIGKILL) + if d in ('', 'Q'): + os._exit(0) + else: + os._exit(1) + except BaseException as e: + import traceback + traceback.print_exc() + os._exit(1) + os.close(read_fd) + yield + os.write(write_fd, 'Q') + os.close(write_fd) + + +def grouper(iterable, n, fillvalue=None): + "Collect data into fixed-length chunks or blocks" + # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" + args = [iter(iterable)] * n + return itertools.izip_longest(*args, fillvalue=fillvalue) + + +class PXEBoot(morphlib.writeexts.WriteExtension): + @contextlib.contextmanager + def _vlan(self, interface, vlan): + viface = '%s.%s' % (interface, vlan) + self.status(msg='Creating vlan %(viface)s', viface=viface) + subprocess.check_call(['vconfig', 'add', interface, str(vlan)]) + try: + yield viface + finally: + self.status(msg='Destroying vlan %(viface)s', viface=viface) + subprocess.call(['vconfig', 'rem', viface]) + + @contextlib.contextmanager + def _static_ip(self, iface): + valid_ranges = set(( + IPRange(_quad_dot_to_int('192.168.0.0'), 16), + IPRange(_quad_dot_to_int('172.16.0.0'), 12), + IPRange(_quad_dot_to_int('10.0.0.0'), 8), + )) + invalid_ranges = set(IPRange(prefix, prefixlen) + for (prefix, prefixlen) in _get_routes()) + prefix, prefixlen = find_subnet(valid_ranges, invalid_ranges) + netaddr = prefix + dhcp_server_ip = netaddr + 1 + client_ip = netaddr + 2 + broadcast_ip = prefix | ((1 << (32 - prefixlen)) - 1) + self.status(msg='Assigning ip address %(ip)s/%(prefixlen)d to ' + 'iface %(iface)s', + ip=_int_to_quad_dot(dhcp_server_ip), prefixlen=prefixlen, + iface=iface) + subprocess.check_call(['ip', 'addr', 'add', + '{}/{}'.format(_int_to_quad_dot(dhcp_server_ip), + prefixlen), + 'broadcast', _int_to_quad_dot(broadcast_ip), + 'scope', 'global', + 'dev', iface]) + try: + yield (dhcp_server_ip, client_ip, broadcast_ip) + finally: + self.status(msg='Removing ip addresses from iface %(iface)s', + iface=iface) + subprocess.call(['ip', 'addr', 'flush', 'dev', iface]) + + @contextlib.contextmanager + def _up_interface(self, iface): + self.status(msg='Bringing interface %(iface)s up', iface=iface) + subprocess.check_call(['ip', 'link', 'set', iface, 'up']) + try: + yield + finally: + self.status(msg='Bringing interface %(iface)s down', iface=iface) + subprocess.call(['ip', 'link', 'set', iface, 'down']) + + @contextlib.contextmanager + def static_ip(self, interface): + with self._static_ip(iface=interface) as (host_ip, client_ip, + broadcast_ip), \ + self._up_interface(iface=interface): + yield (_int_to_quad_dot(host_ip), + _int_to_quad_dot(client_ip), + _int_to_quad_dot(broadcast_ip)) + + @contextlib.contextmanager + def vlan(self, interface, vlan): + with self._vlan(interface=interface, vlan=vlan) as viface, \ + self.static_ip(interface=viface) \ + as (host_ip, client_ip, broadcast_ip): + yield host_ip, client_ip, broadcast_ip + + @contextlib.contextmanager + def _tempdir(self): + td = tempfile.mkdtemp() + print 'Created tempdir:', td + try: + yield td + finally: + shutil.rmtree(td, ignore_errors=True) + + @contextlib.contextmanager + def _remote_tempdir(self, hostname, template): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + td = cliapp.ssh_runcmd(hostname, ['mktemp', '-d', template]).strip() + try: + yield td + finally: + if not persist: + cliapp.ssh_runcmd(hostname, ['find', td, '-delete']) + + def _serve_tftpd(self, sock, host, port, interface, tftproot): + self.settings.progname = 'tftp server' + self._set_process_name() + while True: + logging.debug('tftpd waiting for connections') + # recvfrom with MSG_PEEK is how you accept UDP connections + _, peer = sock.recvfrom(0, socket.MSG_PEEK) + conn = sock + logging.debug('Connecting socket to peer: ' + repr(peer)) + conn.connect(peer) + # The existing socket is now only serving that peer, so we need to + # bind a new UDP socket to the wildcard address, which needs the + # port to be in REUSEADDR mode. + conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + logging.debug('Binding replacement socket to ' + repr((host, port))) + sock.bind((host, port)) + + logging.debug('tftpd server handing connection to tftpd') + tftpd_serve = ['tftpd', '-rl', tftproot] + ret = subprocess.call(args=tftpd_serve, stdin=conn, + stdout=conn, stderr=None, close_fds=True) + # It's handy to turn off REUSEADDR after the rebinding, + # so we can protect against future bind attempts on this port. + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0) + logging.debug('tftpd exited %d' % ret) + os._exit(0) + + @contextlib.contextmanager + def _spawned_tftp_server(self, tftproot, host_ip, interface, tftp_port=0): + # inetd-style launchers tend to bind UDP ports with SO_REUSEADDR, + # because they need to have multiple ports bound, one for recieving + # all connection attempts on that port, and one for each concurrent + # connection with a peer + # this makes detecting whether there's a tftpd running difficult, so + # we'll instead use an ephemeral port and configure the PXE boot to + # use that tftp server for the kernel + s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) + s.bind((host_ip, tftp_port)) + host, port = s.getsockname() + self.status(msg='Bound listen socket to %(host)s, %(port)s', + host=host, port=port) + pid = os.fork() + if pid == 0: + try: + self._serve_tftpd(sock=s, host=host, port=port, + interface=interface, tftproot=tftproot) + except BaseException as e: + import traceback + traceback.print_exc() + os._exit(1) + s.close() + with executor(pid): + try: + yield port + finally: + self.status(msg='Killing tftpd listener pid=%(pid)d', + pid=pid) + os.kill(pid, signal.SIGKILL) + + @contextlib.contextmanager + def tftp_server(self, host_ip, interface, tftp_port=0): + with self._tempdir() as tftproot, \ + self._spawned_tftp_server(tftproot=tftproot, host_ip=host_ip, + interface=interface, + tftp_port=tftp_port) as tftp_port: + self.status(msg='Serving tftp root %(tftproot)s, on port %(port)d', + port=tftp_port, tftproot=tftproot) + yield tftp_port, tftproot + + @contextlib.contextmanager + def _local_copy(self, src, dst): + self.status(msg='Installing %(src)s to %(dst)s', + src=src, dst=dst) + shutil.copy2(src=src, dst=dst) + try: + yield + finally: + self.status(msg='Removing %(dst)s', dst=dst) + os.unlink(dst) + + @contextlib.contextmanager + def _local_symlink(self, src, dst): + os.symlink(src, dst) + try: + yield + finally: + os.unlink(dst) + + def local_pxelinux(self, tftproot): + return self._local_copy('/usr/share/syslinux/pxelinux.0', + os.path.join(tftproot, 'pxelinux.0')) + + def local_kernel(self, rootfs, tftproot): + return self._local_copy(os.path.join(rootfs, 'boot/vmlinuz'), + os.path.join(tftproot, 'kernel')) + + @contextlib.contextmanager + def _remote_copy(self, hostname, src, dst): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + with open(src, 'r') as f: + cliapp.ssh_runcmd(hostname, + ['install', '-D', '-m644', '/proc/self/fd/0', + dst], stdin=f, stdout=None, stderr=None) + try: + yield + finally: + if not persist: + cliapp.ssh_runcmd(hostname, ['rm', dst]) + + @contextlib.contextmanager + def _remote_symlink(self, hostname, src, dst): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + cliapp.ssh_runcmd(hostname, + ['ln', '-s', '-f', src, dst], + stdin=None, stdout=None, stderr=None) + try: + yield + finally: + if not persist: + cliapp.ssh_runcmd(hostname, ['rm', '-f', dst]) + + @contextlib.contextmanager + def remote_kernel(self, rootfs, tftp_url, macaddr): + for name in ('vmlinuz', 'zImage', 'uImage'): + kernel_path = os.path.join(rootfs, 'boot', name) + if os.path.exists(kernel_path): + break + else: + raise cliapp.AppException('Failed to locate kernel') + url = urlparse.urlsplit(tftp_url) + basename = '{}-kernel'.format(_normalise_macaddr(macaddr)) + target_path = os.path.join(url.path, basename) + with self._remote_copy(hostname=url.hostname, src=kernel_path, + dst=target_path): + yield basename + + @contextlib.contextmanager + def remote_fdt(self, rootfs, tftp_url, macaddr): + fdt_rel_path = os.environ.get('DTB_PATH', '') + if fdt_rel_path == '': + yield + fdt_abs_path = os.path.join(rootfs, fdt_rel_path) + if not fdt_abs_path: + raise cliapp.AppException('Failed to locate Flattened Device Tree') + url = urlparse.urlsplit(tftp_url) + basename = '{}-fdt'.format(_normalise_macaddr(macaddr)) + target_path = os.path.join(url.path, basename) + with self._remote_copy(hostname=url.hostname, src=fdt_abs_path, + dst=target_path): + yield basename + + @contextlib.contextmanager + def local_nfsroot(self, rootfs, target_ip): + nfsroot = target_ip + ':' + rootfs + self.status(msg='Exporting %(nfsroot)s as local nfsroot', + nfsroot=nfsroot) + cliapp.runcmd(['exportfs', '-o', 'ro,insecure,no_root_squash', + nfsroot]) + try: + yield + finally: + self.status(msg='Removing %(nfsroot)s from local nfsroots', + nfsroot=nfsroot) + cliapp.runcmd(['exportfs', '-u', nfsroot]) + + @contextlib.contextmanager + def remote_nfsroot(self, rootfs, rsync_url, macaddr): + url = urlparse.urlsplit(rsync_url) + template = os.path.join(url.path, + _normalise_macaddr(macaddr) + '.XXXXXXXXXX') + with self._remote_tempdir(hostname=url.hostname, template=template) \ + as tempdir: + nfsroot = urlparse.urlunsplit((url.scheme, url.netloc, tempdir, + url.query, url.fragment)) + cliapp.runcmd(['rsync', '-asSPH', '--delete', rootfs, nfsroot], + stdin=None, stdout=open(os.devnull, 'w'), + stderr=None) + yield os.path.join(os.path.basename(tempdir), + os.path.basename(rootfs)) + + @staticmethod + def _write_pxe_config(fh, kernel_tftp_url, rootfs_nfs_url, device=None, + fdt_subpath=None, extra_args=''): + + if device is None: + ip_cfg = "ip=dhcp" + else: + ip_cfg = "ip=:::::{device}:dhcp::".format(device=device) + + fh.write(textwrap.dedent('''\ + DEFAULT default + LABEL default + LINUX {kernel_url} + APPEND root=/dev/nfs {ip_cfg} nfsroot={rootfs_nfs_url} {extra_args} + ''').format(kernel_url=kernel_tftp_url, ip_cfg=ip_cfg, + rootfs_nfs_url=rootfs_nfs_url, extra_args=extra_args)) + if fdt_subpath is not None: + fh.write("FDT {}\n".format(fdt_subpath)) + fh.flush() + + @contextlib.contextmanager + def local_pxeboot_config(self, tftproot, macaddr, ip, tftp_port, + nfsroot_dir, device=None): + kernel_tftp_url = 'tftp://{}:{}/kernel'.format(ip, tftp_port) + rootfs_nfs_url = '{}:{}'.format(ip, nfsroot_dir) + pxe_cfg_filename = _normalise_macaddr(macaddr) + pxe_cfg_path = os.path.join(tftproot, 'pxelinux.cfg', pxe_cfg_filename) + os.makedirs(os.path.dirname(pxe_cfg_path)) + with open(pxe_cfg_path, 'w') as f: + self._write_pxe_config(fh=f, kernel_tftp_url=kernel_tftp_url, + rootfs_nfs_url=rootfs_nfs_url, + device=device, + extra_args=os.environ.get('KERNEL_ARGS','')) + + try: + with self._local_symlink( + src=pxe_cfg_filename, + dst=os.path.join(tftproot, + 'pxelinux.cfg', + '01-' + pxe_cfg_filename)): + yield + finally: + os.unlink(pxe_cfg_path) + + @contextlib.contextmanager + def remote_pxeboot_config(self, tftproot, kernel_tftproot, kernel_subpath, + fdt_subpath, rootfs_nfsroot, rootfs_subpath, + macaddr): + rootfs_nfs_url = '{}/{}'.format(rootfs_nfsroot, rootfs_subpath) + url = urlparse.urlsplit(kernel_tftproot) + kernel_tftp_url = '{}:{}'.format(url.netloc, kernel_subpath) + pxe_cfg_filename = _normalise_macaddr(macaddr) + url = urlparse.urlsplit(tftproot) + inst_cfg_path = os.path.join(url.path, 'pxelinux.cfg') + with tempfile.NamedTemporaryFile() as f: + self._write_pxe_config( + fh=f, kernel_tftp_url=kernel_tftp_url, + fdt_subpath=fdt_subpath, + rootfs_nfs_url=rootfs_nfs_url, + extra_args=os.environ.get('KERNEL_ARGS','')) + with self._remote_copy( + hostname=url.hostname, src=f.name, + dst=os.path.join(inst_cfg_path, + pxe_cfg_filename)), \ + self._remote_symlink( + hostname=url.hostname, + src=pxe_cfg_filename, + dst=os.path.join(inst_cfg_path, + '01-' + pxe_cfg_filename)): + yield + + @contextlib.contextmanager + def dhcp_server(self, interface, host_ip, target_ip, broadcast_ip): + with self._tempdir() as td: + leases_path = os.path.join(td, 'leases') + config_path = os.path.join(td, 'config') + stdout_path = os.path.join(td, 'stdout') + stderr_path = os.path.join(td, 'stderr') + pidfile_path = os.path.join(td, 'pid') + with open(config_path, 'w') as f: + f.write(textwrap.dedent('''\ + start {target_ip} + end {target_ip} + interface {interface} + max_leases 1 + lease_file {leases_path} + pidfile {pidfile_path} + boot_file pxelinux.0 + option dns {host_ip} + option broadcast {broadcast_ip} + ''').format(**locals())) + with open(stdout_path, 'w') as stdout, \ + open(stderr_path, 'w') as stderr: + sp = subprocess.Popen(['udhcpd', '-f', config_path], cwd=td, + stdin=open(os.devnull), stdout=stdout, + stderr=stderr) + try: + with executor(sp.pid): + yield + finally: + sp.terminate() + + def get_interface_ip(self, interface): + ip_addresses = [] + info = cliapp.runcmd(['ip', '-o', '-f', 'inet', + 'addr', 'show', interface]).rstrip('\n') + if info: + tokens = collections.deque(info.split()[1:]) + ifname = tokens.popleft() + while tokens: + tok = tokens.popleft() + if tok == 'inet': + address = tokens.popleft() + address, netmask = address.split('/') + ip_addresses.append(address) + elif tok == 'brd': + tokens.popleft() # not interested in broadcast address + elif tok == 'scope': + tokens.popleft() # not interested in scope tag + else: + continue + if not ip_addresses: + raise cliapp.AppException('Interface %s has no addresses' + % interface) + if len(ip_addresses) > 1: + warnings.warn('Interface %s has multiple addresses, ' + 'using first (%s)' % (interface, ip_addresses[0])) + return ip_addresses[0] + + def ipmi_set_target_vlan(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN + default = textwrap.dedent('''\ + ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ + lan set 1 vlan id "$PXEBOOT_VLAN" + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please set the target\\'s vlan to $PXEBOOT_VLAN, \\ + then enter \\"vlanned\\" + read + if [ "$REPLY" = vlanned ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_SET_VLAN_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def ipmi_pxe_reboot_target(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN + default = textwrap.dedent('''\ + set -- ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" + "$@" chassis bootdev pxe + "$@" chassis power reset + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please reboot the target in PXE mode, then\\ + enter \\"pxe-booted\\" + read + if [ "$REPLY" = pxe-booted ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_PXE_REBOOT_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def wait_for_target_to_install(self): + command = os.environ.get( + 'PXEBOOT_WAIT_INSTALL_COMMAND', + textwrap.dedent('''\ + while true; do + echo Please wait for the system to install, then \\ + enter \\"installed\\" + read + if [ "$REPLY" = installed ]; then + break + fi + done + ''')) + subprocess.check_call(['sh', '-euc', command, '-']) + + def ipmi_unset_target_vlan(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST + default = textwrap.dedent('''\ + ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ + lan set 1 vlan id off + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please reset the target\\'s vlan, \\ + then enter \\"unvlanned\\" + read + if [ "$REPLY" = unvlanned ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_UNSET_VLAN_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def ipmi_reboot_target(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST + default = textwrap.dedent('''\ + ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ + chassis power reset + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please reboot the target, then\\ + enter \\"rebooted\\" + read + if [ "$REPLY" = rebooted ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_REBOOT_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def process_args(self, (temp_root, macaddr)): + interface = os.environ.get('PXEBOOT_DEPLOYER_INTERFACE', None) + target_interface = os.environ.get('PXEBOOT_TARGET_INTERFACE', None) + vlan = os.environ.get('PXEBOOT_VLAN') + if vlan is not None: vlan = int(vlan) + mode = os.environ.get('PXEBOOT_MODE') + if mode is None: + if interface: + if vlan is not None: + mode = 'spawn-vlan' + else: + if 'PXEBOOT_CONFIG_TFTP_ADDRESS' in os.environ: + mode = 'existing-dhcp' + else: + mode = 'spawn-novlan' + else: + mode = 'existing-server' + assert mode in ('spawn-vlan', 'spawn-novlan', 'existing-dhcp', + 'existing-server') + if mode == 'spawn-vlan': + with self.vlan(interface=interface, vlan=vlan) \ + as (host_ip, target_ip, broadcast_ip), \ + self.tftp_server(host_ip=host_ip, interface=interface) \ + as (tftp_port, tftproot), \ + self.local_pxelinux(tftproot=tftproot), \ + self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ + self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ + self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, + device=target_interface, + ip=host_ip, tftp_port=tftp_port, + nfsroot_dir=temp_root), \ + self.dhcp_server(interface=interface, host_ip=host_ip, + target_ip=target_ip, + broadcast_ip=broadcast_ip): + self.ipmi_set_target_vlan() + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_unset_target_vlan() + self.ipmi_reboot_target() + elif mode == 'spawn-novlan': + with self.static_ip(interface=interface) as (host_ip, target_ip, + broadcast_ip), \ + self.tftp_server(host_ip=host_ip, interface=interface, + tftp_port=69) \ + as (tftp_port, tftproot), \ + self.local_pxelinux(tftproot=tftproot), \ + self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ + self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ + self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, + device=target_interface, + ip=host_ip, tftp_port=tftp_port, + nfsroot_dir=temp_root), \ + self.dhcp_server(interface=interface, host_ip=host_ip, + target_ip=target_ip, + broadcast_ip=broadcast_ip): + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_reboot_target() + elif mode == 'existing-dhcp': + ip = self.get_interface_ip(interface) + config_tftpaddr = os.environ['PXEBOOT_CONFIG_TFTP_ADDRESS'] + with self.tftp_server(ip=ip, interface=interface, tftp_port=69) \ + as (tftp_port, tftproot), \ + self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ + self.local_nfsroot(rootfs=temp_root, client_ip=''): + kernel_tftproot = 'tftp://{}:{}/'.format(ip, tftp_port) + rootfs_nfsroot = '{}:{}'.format(ip, temp_root) + with self.remote_pxeboot_config( + tftproot=config_tftpaddr, + kernel_tftproot=kernel_tftproot, + kernel_subpath='kernel', + rootfs_nfsroot=nfsroot, + rootfs_subpath='', + macaddr=macaddr): + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_reboot_target() + elif mode == 'existing-server': + config_tftpaddr = os.environ[ 'PXEBOOT_CONFIG_TFTP_ADDRESS'] + kernel_tftpaddr = os.environ.get('PXEBOOT_KERNEL_TFTP_ADDRESS', + config_tftpaddr) + url = urlparse.urlsplit(kernel_tftpaddr) + kernel_tftproot = os.environ.get('PXEBOOT_KERNEL_TFTP_ROOT', + 'tftp://%s/%s' % (url.hostname, + url.path)) + rootfs_rsync = os.environ['PXEBOOT_ROOTFS_RSYNC_ADDRESS'] + url = urlparse.urlsplit(rootfs_rsync) + nfsroot = os.environ.get('PXEBOOT_ROOTFS_NFSROOT', + '%s:%s' % (url.hostname, url.path)) + with self.remote_kernel(rootfs=temp_root, tftp_url=kernel_tftpaddr, + macaddr=macaddr) as kernel_subpath, \ + self.remote_fdt(rootfs=temp_root, tftp_url=kernel_tftpaddr, + macaddr=macaddr) as fdt_subpath, \ + self.remote_nfsroot(rootfs=temp_root, rsync_url=rootfs_rsync, \ + macaddr=macaddr) as rootfs_subpath, \ + self.remote_pxeboot_config(tftproot=config_tftpaddr, + kernel_tftproot=kernel_tftproot, + kernel_subpath=kernel_subpath, + fdt_subpath=fdt_subpath, + rootfs_nfsroot=nfsroot, + rootfs_subpath=rootfs_subpath, + macaddr=macaddr): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + if not persist: + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_reboot_target() + else: + cliapp.AppException('Invalid PXEBOOT_MODE: %s' % mode) + +PXEBoot().run() diff --git a/extensions/pxeboot.write.help b/extensions/pxeboot.write.help new file mode 100644 index 00000000..7cb78bce --- /dev/null +++ b/extensions/pxeboot.write.help @@ -0,0 +1,166 @@ +help: > + pxeboot.write extension. + + + This write extension will serve your generated system over NFS to + the target system. + + In all modes `location` is the mac address of the interface that + the target will PXE boot from. This is used so that the target will + load the configuration file appropriate to it. + + + # `PXEBOOT_MODE` + + + It has 4 modes, which can be specified with PXEBOOT_MODE, or inferred + from which parameters are passed: + + + ## spawn-vlan + + + Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_VLAN to configure + the target to pxeboot on a vlan and spawn a dhcp, nfs and tftp + server. This is potentially the fastest, since it doesn't need to + copy data to other servers. + + This will create a vlan interface for the interface specified in + PXEBOOT_DEPLOYER_INTERFACE and spawn a dhcp server which serves + pxelinux.0, a configuration file and a kernel image from itself. + + The configuration file informs the target to boot with a kernel + command-line that uses an NFS root served from the deployment host. + + + ## spawn-novlan + + + Specify PXEBOOT_DEPLOYER_INTERFACE without PXEBOOT_VLAN to configure + like `spawn-vlan`, but without creating the vlan interface. + + This assumes that you have exclusive access to the interface, such + as if you're plugged in to the device directly, or your interface + is vlanned by your infrastructure team. + + This is required if you are serving from a VM and bridging it to the + correct network via macvtap. For this to work, you need to macvtap + bridge to a pre-vlanned interface on your host machine. + + + ## existing-dhcp + + + Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_CONFIG_TFTP_ADDRESS + to put config on an existing tftp server, already configured by the + dhcp server. + + This spawns a tftp server and configures the local nfs server, but + doesn't spawn a dhcp server. This is useful if you have already got a + dhcp server that serves PXE images. + + PXEBOOT_CONFIG_TFTP_ADDRESS is a URL in the form `sftp://$HOST/$PATH`. + The configuration file is copied to `$PATH/pxelinux.cfg/` on the + target identified by `$HOST`. + + + ## existing-server + + + Specify at least PXEBOOT_CONFIG_TFTP_ADDRESS and + PXEBOOT_ROOTFS_RSYNC_ADDRESS to specify existing servers to copy + config, kernels and the rootfs to. + + Configuration is copied to the target as `existing-dhcp`. + + Specify PXEBOOT_KERNEL_TFTP_ADDRESS if the tftp server that the + kernel must be downloaded from is different to that of the pxelinux + configuration file. + + PXEBOOT_ROOTFS_RSYNC_ADDRESS is a rsync URL describing where to copy + nfsroots to where they will be exported by the NFS server. + + Specify PXEBOOT_ROOTFS_NFSROOT if the nfsroot appears as a different + address from the target's perspective. + + + # IPMI commands + + + After the PXE boot has been set up, the target needs to be rebooted + in PXE mode. + + If the target is IPMI enabled, you can set `IPMI_USER`, `IPMI_HOST` + and `IPMI_PASSWORD` to make it reboot the target into netboot mode + automatically. + + If they are not specified, then instructions will be displayed, and + `pxeboot.write` will wait for you to finish. + + If there are command-line automation tools for rebooting the target + in netboot mode, then appropriate commands can be defined in the + following variables. + + + ## PXEBOOT_PXE_REBOOT_COMMAND + + + This command will be used to reboot the target device with its boot + device set to PXE boot. + + + ## PXEBOOT_REBOOT_COMMAND + + + This command will be used to reboot the target device in its default + boot mode. + + + ## PXEBOOT_WAIT_INSTALL_COMMAND + + + If it is possible for the target to notify you that it has finished + installing, you can put a command in here to wait for the event. + + + # Misc + + + ## KERNEL_ARGS + + + Additional kernel command line options. Note that the following + options + + root=/dev/nfs ip=dhcp nfsroot=$NFSROOT` + + are implicitly added by the extension. + + + ## DTB_PATH + + + Location in the deployed root filesystem of the Flattened Device + Tree blob (FDT) to use. + + + ## PXE_INSTALLER + + + If set to `no`, `False` or any other YAML value for false, the + remotely installed rootfs, kernel, bootloader config file and + device tree blob if specified, will not be removed after the + deployment finishes. This variable is only meanful on the + `existing-server` mode. + + + ## PXEBOOT_TARGET_INTERFACE + + Name of the interface of the target to pxeboot from. Some targets + with more than one interface try to get the rootfs from a different + interface than the interface from where the pxeboot server is + reachable. Using this variable, the kernel arguments will be filled + to include the device. + + Note that the name of this interface is the kernel's default name, + usually called ethX, and is non-determinisic. diff --git a/extensions/sdk.write b/extensions/sdk.write new file mode 100755 index 00000000..8d3d2a63 --- /dev/null +++ b/extensions/sdk.write @@ -0,0 +1,284 @@ +#!/bin/sh +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# =*= License: GPL-2 =*= + +set -eu + +die(){ + echo "$@" >&2 + exit 1 +} + +shellescape(){ + echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" +} + +########################## END OF COMMON HEADER ############################### +# +# The above lines, as well as being part of this script, are copied into the +# self-installing SDK blob's header script, as a means of re-using content. +# + +help(){ + cat <>"$OUTPUT_SCRIPT" <>"$OUTPUT_SCRIPT" <<'EOF' +########################### START OF HEADER SCRIPT ############################ + +usage(){ + cat <&2 + usage >&2 + exit 1 +fi + +TOOLCHAIN_PATH="$(readlink -f \"$1\")" + +sedescape(){ + # Escape the passed in string so it can be safely interpolated into + # a sed expression as a literal value. + echo "$1" | sed -e 's/[\/&]/\\&/g' +} + +prepend_to_path_elements(){ + # Prepend $1 to every entry in the : separated list specified as $2. + local prefix="$1" + ( + # Split path into components + IFS=: + set -- $2 + # Print path back out with new prefix + printf %s "$prefix/$1" + shift + for arg in "$@"; do + printf ":%s" "$prefix/$arg" + done + ) +} + +extract_rootfs(){ + # Extract the bzipped tarball at the end of the script passed as $1 + # to the path specified as $2 + local selfextractor="$1" + local target="$2" + local script_end="$(($(\ + grep -aEn -m1 -e '^#+ END OF HEADER SCRIPT #+$' "$selfextractor" | + cut -d: -f1) + 1 ))" + mkdir -p "$target" + tail -n +"$script_end" "$selfextractor" | tar -xj -C "$target" . +} + +amend_text_file_paths(){ + # Replace all instances of $3 with $4 in the directory specified by $1 + # excluding the subdirectory $2 + local root="$1" + local inner_sysroot="$2" + local old_prefix="$3" + local new_prefix="$4" + find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ + -exec sh -c 'file "$1" | grep -q text' - {} \; \ + -exec sed -i -e \ + "s/$(sedescape "$old_prefix")/$(sedescape "$new_prefix")/g" {} + +} + +filter_patchelf_errors(){ + # Filter out warnings from patchelf that are acceptable + # The warning that it's making a file bigger is just noise + # The warning about not being an ELF executable just means we got a + # false positive from file that it was an ELF binary + # Failing to find .interp is because for convenience, we set the + # interpreter in the same command as setting the rpath, even though + # we give it both executables and libraries. + grep -v -e 'warning: working around a Linux kernel bug' \ + -e 'not an ELF executable' \ + -e 'cannot find section .interp' +} + +patch_elves(){ + # Set the interpreter and library paths of ELF binaries in $1, + # except for the $2 subdirectory, using the patchelf command in the + # toolchain specified as $3, so that it uses the linker specified + # as $4 as the interpreter, and the runtime path specified by $5. + # + # The patchelf inside the toolchain is used to ensure that it works + # independently of the availability of patchelf on the host. + # + # This is possible by invoking the linker directly and specifying + # --linker-path as the RPATH we want to set the binaries to use. + local root="$1" + local inner_sysroot="$2" + local patchelf="$3" + local linker="$4" + local lib_path="$5" + find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ + -type f -perm +u=x \ + -exec sh -c 'file "$1" | grep -q "ELF"' - {} \; \ + -exec "$linker" --library-path "$lib_path" \ + "$patchelf" --set-interpreter "$linker" \ + --set-rpath "$lib_path" {} \; 2>&1 \ + | filter_patchelf_errors +} + +generate_environment_setup(){ + local target="$1" + install -m 644 -D /dev/stdin "$target" <>"$OUTPUT_SCRIPT" . diff --git a/extensions/strip-gplv3.configure b/extensions/strip-gplv3.configure new file mode 100755 index 00000000..c08061ad --- /dev/null +++ b/extensions/strip-gplv3.configure @@ -0,0 +1,101 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +''' A Morph configuration extension for removing gplv3 chunks from a system + +Using a hard-coded list of chunks, it will read the system's /baserock metadata +to find the files created by that chunk, then remove them. + +''' + +import cliapp +import re +import os +import json + +class StripGPLv3ConfigureExtension(cliapp.Application): + gplv3_chunks = [ + ['autoconf', ''], + ['automake', ''], + ['bash', ''], + ['binutils', ''], + ['bison', ''], + ['ccache', ''], + ['cmake', ''], + ['flex', ''], + ['gawk', ''], + ['gcc', r'^.*lib.*\.so(\.\d+)*$'], + ['gdbm', ''], + ['gettext', ''], + ['gperf', ''], + ['groff', ''], + ['libtool', r'^.*lib.*\.so(\.\d+)*$'], + ['m4', ''], + ['make', ''], + ['nano', ''], + ['patch', ''], + ['rsync', ''], + ['texinfo-tarball', ''], + ] + + def process_args(self, args): + target_root = args[0] + meta_dir = os.path.join(target_root, 'baserock') + + for chunk in self.gplv3_chunks: + regex = os.path.join(meta_dir, "%s-[^-]\+\.meta" % chunk[0]) + artifacts = self.runcmd(['find', meta_dir, '-regex', regex]) + + for artifact in artifacts.split(): + self.remove_chunk(target_root, artifact, chunk[1]) + + os.symlink(os.path.join(os.sep, 'bin', 'busybox'), + os.path.join(target_root, 'usr', 'bin', 'awk')) + + def remove_chunk(self, target_root, chunk, pattern): + chunk_meta_path = os.path.join(target_root, 'baserock', chunk) + + with open(chunk_meta_path, 'r') as f: + chunk_meta_data = json.load(f) + + if not 'contents' in chunk_meta_data: + raise cliapp.AppError('Chunk %s does not have a "contents" list' + % chunk) + updated_contents = [] + for content_entry in reversed(chunk_meta_data['contents']): + pat = re.compile(pattern) + if len(pattern) == 0 or not pat.match(content_entry): + self.remove_content_entry(target_root, content_entry) + else: + updated_contents.append(content_entry) + + def remove_content_entry(self, target_root, content_entry): + entry_path = os.path.join(target_root, './' + content_entry) + if not entry_path.startswith(target_root): + raise cliapp.AppException('%s is not in %s' + % (entry_path, target_root)) + if os.path.exists(entry_path): + if os.path.islink(entry_path): + os.unlink(entry_path) + elif os.path.isfile(entry_path): + os.remove(entry_path) + elif os.path.isdir(entry_path): + if not os.listdir(entry_path): + os.rmdir(entry_path) + else: + raise cliapp.AppException('%s is not a link, file or directory' + % entry_path) +StripGPLv3ConfigureExtension().run() diff --git a/extensions/swift-build-rings.yml b/extensions/swift-build-rings.yml new file mode 100644 index 00000000..1ffe9c37 --- /dev/null +++ b/extensions/swift-build-rings.yml @@ -0,0 +1,34 @@ +--- +- hosts: localhost + vars: + - rings: + - { name: account, port: 6002 } + - { name: container, port: 6001 } + - { name: object, port: 6000 } + remote_user: root + tasks: + - file: path={{ ansible_env.ROOT }}/etc/swift owner=root group=root state=directory + + - name: Create ring + shell: swift-ring-builder {{ item.name }}.builder create {{ ansible_env.SWIFT_PART_POWER }} + {{ ansible_env.SWIFT_REPLICAS }} {{ ansible_env.SWIFT_MIN_PART_HOURS }} + with_items: rings + + - name: Add each storage node to the ring + shell: swift-ring-builder {{ item[0].name }}.builder + add r1z1-{{ item[1].ip }}:{{ item[0].port }}/{{ item[1].device }} {{ item[1].weight }} + with_nested: + - rings + - ansible_env.SWIFT_STORAGE_DEVICES + + - name: Rebalance the ring + shell: swift-ring-builder {{ item.name }}.builder rebalance {{ ansible_env.SWIFT_REBALANCE_SEED }} + with_items: rings + + - name: Copy ring configuration files into place + copy: src={{ item.name }}.ring.gz dest={{ ansible_env.ROOT }}/etc/swift + with_items: rings + + - name: Copy ring builder files into place + copy: src={{ item.name }}.builder dest={{ ansible_env.ROOT }}/etc/swift + with_items: rings diff --git a/extensions/swift-storage-devices-validate.py b/extensions/swift-storage-devices-validate.py new file mode 100755 index 00000000..57ab23d0 --- /dev/null +++ b/extensions/swift-storage-devices-validate.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# This is used by the openstack-swift.configure extension +# to validate any provided storage device specifiers +# under SWIFT_STORAGE_DEVICES +# + + +''' + This is used by the swift-storage.configure extension + to validate any storage device specifiers specified + in the SWIFT_STORAGE_DEVICES environment variable +''' + +from __future__ import print_function + +import yaml +import sys + +EXAMPLE_DEVSPEC = '{device: sdb1, ip: 127.0.0.1, weight: 100}' +REQUIRED_KEYS = ['ip', 'device', 'weight'] + +def err(msg): + print(msg, file=sys.stderr) + sys.exit(1) + +if len(sys.argv) != 2: + err('usage: %s STRING_TO_BE_VALIDATED' % sys.argv[0]) + +swift_storage_devices = yaml.load(sys.argv[1]) + +if not isinstance(swift_storage_devices, list): + err('Expected list of device specifiers\n' + 'Example: [%s]' % EXAMPLE_DEVSPEC) + +for d in swift_storage_devices: + if not isinstance(d, dict): + err("Invalid device specifier: `%s'\n" + 'Device specifier must be a dictionary\n' + 'Example: %s' % (d, EXAMPLE_DEVSPEC)) + + if set(d.keys()) != set(REQUIRED_KEYS): + err("Invalid device specifier: `%s'\n" + 'Specifier should contain: %s\n' + 'Example: %s' % (d, str(REQUIRED_KEYS)[1:-1], EXAMPLE_DEVSPEC)) diff --git a/extensions/swift-storage.configure b/extensions/swift-storage.configure new file mode 100644 index 00000000..391b392a --- /dev/null +++ b/extensions/swift-storage.configure @@ -0,0 +1,107 @@ +#!/bin/bash +# +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +# The ansible script needs to know where the rootfs is, so we export it here +export ROOT="$1" + +validate_number() { + local name="$1" + local value="$2" + + local pattern='^[0-9]+$' + if ! [[ $value =~ $pattern ]] + then + echo "'$name' must be a number" >&2 + exit 1 + fi +} + +validate_non_empty() { + local name="$1" + local value="$2" + + if [[ $value = None ]] + then + echo "'$name' cannot be empty" >&2 + exit 1 + fi +} + +MANDATORY_OPTIONS="SWIFT_HASH_PATH_PREFIX \ + SWIFT_HASH_PATH_SUFFIX \ + SWIFT_REBALANCE_SEED \ + SWIFT_PART_POWER \ + SWIFT_REPLICAS \ + SWIFT_MIN_PART_HOURS \ + SWIFT_STORAGE_DEVICES \ + CONTROLLER_HOST_ADDRESS \ + MANAGEMENT_INTERFACE_IP_ADDRESS" + +for option in $MANDATORY_OPTIONS +do + if ! [[ -v $option ]] + then + missing_option=True + echo "Required option $option isn't set!" >&2 + fi +done + +if [[ $missing_option = True ]]; then exit 1; fi + +./swift-storage-devices-validate.py "$SWIFT_STORAGE_DEVICES" + +# Validate SWIFT_PART_POWER, SWIFT_REPLICAS, SWIFT_MIN_PART_HOURS +# just make sure they're numbers + +validate_number "SWIFT_PART_POWER" "$SWIFT_PART_POWER" +validate_number "SWIFT_REPLICAS" "$SWIFT_REPLICAS" +validate_number "SWIFT_MIN_PART_HOURS" "$SWIFT_MIN_PART_HOURS" + +# Make sure these aren't empty +validate_non_empty "SWIFT_HASH_PATH_PREFIX" "$SWIFT_HASH_PATH_PREFIX" +validate_non_empty "SWIFT_HASH_PATH_SUFFIX" "$SWIFT_HASH_PATH_SUFFIX" +validate_non_empty "SWIFT_REBALANCE_SEED" "$SWIFT_REBALANCE_SEED" +validate_non_empty "CONTROLLER_HOST_ADDRESS" "$CONTROLLER_HOST_ADDRESS" +validate_non_empty "MANAGEMENT_INTERFACE_IP_ADDRESS" "$MANAGEMENT_INTERFACE_IP_ADDRESS" + +mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks + +# A swift controller needs the storage setup service +# but does not want any of the other storage services enabled +ln -s "/usr/lib/systemd/system/swift-storage-setup.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage-setup.service" + +SWIFT_CONTROLLER=${SWIFT_CONTROLLER:-False} + +if [[ $SWIFT_CONTROLLER = False ]] +then + ln -s "/usr/lib/systemd/system/rsync.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/rsync.service" + ln -s "/usr/lib/systemd/system/swift-storage.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage.service" +fi + +# Build swift data structures (the rings) +/usr/bin/ansible-playbook -i hosts swift-build-rings.yml + +cat << EOF > "$ROOT"/usr/share/swift/swift-storage-vars.yml +--- +MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS +SWIFT_HASH_PATH_PREFIX: $SWIFT_HASH_PATH_PREFIX +SWIFT_HASH_PATH_SUFFIX: $SWIFT_HASH_PATH_SUFFIX +EOF diff --git a/extensions/trove.configure b/extensions/trove.configure new file mode 100755 index 00000000..f823762c --- /dev/null +++ b/extensions/trove.configure @@ -0,0 +1,148 @@ +#!/bin/sh +# +# Copyright (C) 2013 - 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to fully configure +# a Trove instance at deployment time. It uses the following variables +# from the environment (run `morph help trove.configure` to see a description +# of them): +# +# * TROVE_ID +# * TROVE_HOSTNAME (optional, defaults to TROVE_ID) +# * TROVE_COMPANY +# * LORRY_SSH_KEY +# * UPSTREAM_TROVE +# * UPSTREAM_TROVE_PROTOCOL +# * TROVE_ADMIN_USER +# * TROVE_ADMIN_EMAIL +# * TROVE_ADMIN_NAME +# * TROVE_ADMIN_SSH_PUBKEY +# * LORRY_CONTROLLER_MINIONS (optional, defaults to 4) +# * TROVE_BACKUP_KEYS - a space-separated list of paths to SSH keys. +# (optional) +# * TROVE_GENERIC (optional) +# +# The configuration of a Trove is slightly tricky: part of it has to +# be run on the configured system after it has booted. We accomplish +# this by copying in all the relevant data to the target system +# (in /var/lib/trove-setup), and creating a systemd unit file that +# runs on the first boot. The first boot will be detected by the +# existence of the /var/lib/trove-setup/needed file. + +set -e + +if [ "$TROVE_GENERIC" ] +then + echo "Not configuring the trove, it will be generic" + exit 0 +fi + + +# Check that all the variables needed are present: + +error_vars=false +if test "x$TROVE_ID" = "x"; then + echo "ERROR: TROVE_ID needs to be defined." + error_vars=true +fi + +if test "x$TROVE_COMPANY" = "x"; then + echo "ERROR: TROVE_COMPANY needs to be defined." + error_vars=true +fi + +if test "x$TROVE_ADMIN_USER" = "x"; then + echo "ERROR: TROVE_ADMIN_USER needs to be defined." + error_vars=true +fi + +if test "x$TROVE_ADMIN_NAME" = "x"; then + echo "ERROR: TROVE_ADMIN_NAME needs to be defined." + error_vars=true +fi + +if test "x$TROVE_ADMIN_EMAIL" = "x"; then + echo "ERROR: TROVE_ADMIN_EMAIL needs to be defined." + error_vars=true +fi + +if ! ssh-keygen -lf $LORRY_SSH_KEY > /dev/null 2>&1 +then + echo "ERROR: LORRY_SSH_KEY is not a vaild ssh key." + error_vars=true +fi + +if ! ssh-keygen -lf $WORKER_SSH_PUBKEY > /dev/null 2>&1 +then + echo "ERROR: WORKER_SSH_PUBKEY is not a vaild ssh key." + error_vars=true +fi + +if ! ssh-keygen -lf $TROVE_ADMIN_SSH_PUBKEY > /dev/null 2>&1 +then + echo "ERROR: TROVE_ADMIN_SSH_PUBKEY is not a vaild ssh key." + error_vars=true +fi + +if "$error_vars"; then + exit 1 +fi + +ROOT="$1" + + +TROVE_DATA="$ROOT/etc/trove" +mkdir -p "$TROVE_DATA" + +install -m 0600 "$LORRY_SSH_KEY" "$TROVE_DATA/lorry.key" +install -m 0644 "${LORRY_SSH_KEY}.pub" "$TROVE_DATA/lorry.key.pub" +install -m 0644 "$TROVE_ADMIN_SSH_PUBKEY" "$TROVE_DATA/admin.key.pub" +install -m 0644 "$WORKER_SSH_PUBKEY" "$TROVE_DATA/worker.key.pub" + + +python <<'EOF' >"$TROVE_DATA/trove.conf" +import os, sys, yaml + +trove_configuration={ + 'TROVE_ID': os.environ['TROVE_ID'], + 'TROVE_COMPANY': os.environ['TROVE_COMPANY'], + 'TROVE_ADMIN_USER': os.environ['TROVE_ADMIN_USER'], + 'TROVE_ADMIN_EMAIL': os.environ['TROVE_ADMIN_EMAIL'], + 'TROVE_ADMIN_NAME': os.environ['TROVE_ADMIN_NAME'], + 'LORRY_SSH_KEY': '/etc/trove/lorry.key', + 'LORRY_SSH_PUBKEY': '/etc/trove/lorry.key.pub', + 'TROVE_ADMIN_SSH_PUBKEY': '/etc/trove/admin.key.pub', + 'WORKER_SSH_PUBKEY': '/etc/trove/worker.key.pub', +} + + + +optional_keys = ('MASON_ID', 'HOSTNAME', 'TROVE_HOSTNAME', + 'LORRY_CONTROLLER_MINIONS', 'TROVE_BACKUP_KEYS', + 'UPSTREAM_TROVE', 'UPSTREAM_TROVE_PROTOCOL') + +for key in optional_keys: + if key in os.environ: + trove_configuration[key]=os.environ[key] + +yaml.dump(trove_configuration, sys.stdout, default_flow_style=False) +EOF + +if [ -n "$TROVE_BACKUP_KEYS" ]; then + mkdir -p "$TROVE_DATA/backup-keys" + cp -- $TROVE_BACKUP_KEYS "$TROVE_DATA/backup-keys" + echo "TROVE_BACKUP_KEYS: /etc/trove/backup-keys/*" >> "$TROVE_DATA/trove.conf" +fi diff --git a/extensions/trove.configure.help b/extensions/trove.configure.help new file mode 100644 index 00000000..c96bdf74 --- /dev/null +++ b/extensions/trove.configure.help @@ -0,0 +1,126 @@ +help: | + This is a "morph deploy" configuration extension to fully configure + a Trove instance at deployment time. It uses the following + configuration variables: + + * `TROVE_ID` + * `TROVE_HOSTNAME` (optional, defaults to `TROVE_ID`) + * `TROVE_COMPANY` + * `LORRY_SSH_KEY` + * `UPSTREAM_TROVE` + * `TROVE_ADMIN_USER` + * `TROVE_ADMIN_EMAIL` + * `TROVE_ADMIN_NAME` + * `TROVE_ADMIN_SSH_PUBKEY` + * `LORRY_CONTROLLER_MINIONS` (optional, defaults to 4) + * `TROVE_BACKUP_KEYS` - a space-separated list of paths to SSH keys. + (optional) + + The variables are described in more detail below. + + A Trove deployment needs to know the following things: + + * The Trove's ID and public name. + * The Trove's administrator name and access details. + * Private and public SSH keys for the Lorry user on the Trove. + * Which upstream Trove it should be set to mirror upon initial deploy. + + These are specified with the configuration variables described in this + help. + + * `TROVE_GENERIC` -- boolean. If it's true the trove will be generic + and it won't be configured with any of the other variables listed + here. + + * `TROVE_ID` -- the identifier of the Trove. This separates it from + other Troves, and allows mirroring of Troves to happen without local + changes getting overwritten. + + The Trove ID is used in several ways. Any local repositories (those not + mirrored from elsewhere) get created under a prefix that is the ID. + Thus, the local repositories on the `git.baserock.org` Trove, whose + Trove ID is `baserock`, are named + `baserock/baserock/definitions.git` and similar. The ID is used + there twice: first as a prefix and then as a "project name" within + that prefix. There can be more projects under the prefix. For + example, there is a `baserock/local-config/lorries.git` repository, + where `local-config` is a separate project from `baserock`. Projects + here are a concept for the Trove's git access control language. + + The Trove ID also used as the prefix for any branch and tag names + created locally for repositories that are not local. Thus, in the + `delta/linux.git` repository, any local branches would be called + something like `baserock/morph`, instead of just `morph`. The + Trove's git access control prevents normal uses from pushing + branches and tags that do not have the Trove ID as the prefix. + + * `TROVE_HOSTNAME` -- the public name of the Trove. This is an + optional setting, and defaults to `TROVE_ID`. The public name is + typically the domain name of the server (e.g., `git.baserock.org`), + but can also be an IP address. This setting is used when Trove needs + to generate URLs that point to itself, such as the `git://` and + `http://` URLs for each git repository that is viewed via the web + interface. + + Note that this is _not_ the system hostname. That is set separately, + with the `HOSTNAME` configuration setting (see the + `set-hostname.configure` extension). + + * `TROVE_COMPANY` -- a description of the organisation who own the + Trove. This is shown in various parts of the web interface of the + Trove. It is for descriptive purposes only. + + * `LORRY_SSH_KEY` -- ssh key pair that the Trove's Lorry will use to + access an upstream Trove, and to push updates to the Trove's git + server. + + The value is a filename on the system doing the deployment (where + `morph deploy` is run). The file contains the _private_ key, and the + public key is in a file with the `.pub` suffix added to the name. + + The upstream Trove needs to be configured to allow this key to + access it. This configuration does not do that automatically. + + * `UPSTREAM_TROVE` -- public name of the upstream Trove (domain + name or IP address). This is an optional setting. If it's set, + the new Trove will be configured to mirror that Trove. + + * `TROVE_ADMIN_USER`, `TROVE_ADMIN_EMAIL`, `TROVE_ADMIN_NAME`, + `TROVE_ADMIN_SSH_PUBKEY` -- details of the Trove's (initial) + administrator. + + Each Trove needs at least one administrator user, and one is created + upon initial deployment. `TROVE_ADMIN_USER` is the username of the + account to be created, `TROVE_ADMIN_EMAIL` should be the e-mail of + the user, and `TROVE_ADMIN_NAME` is their name. If more + administrators are needed, the initial person should create them + using the usual Gitano commands. + + * `LORRY_CONTROLLER_MINIONS` -- the number of Lorry Controller worker + processes to start. This is an optional setting and defaults to 4. + The more workers are running, the more Lorry jobs can run at the same + time, but the more resources they require. + + * `TROVE_BACKUP_KEYS` -- a space-separated list of paths to SSH keys. + If this is set, the Trove will have a backup user that can be accessed + with rsync using the SSH keys provided. + + Example + ------- + + The following set of variables could be to deploy a Trove instance: + + TROVE_ID: my-trove + TROVE_HOSTNAME: my-trove.example.com + TROVE_COMPANY: My Personal Trove for Me, Myself and I + LORRY_SSH_KEY: my-trove/lorry.key + UPSTREAM_TROVE: git.baserock.org + UPSTREAM_TROVE_USER: my-trove + UPSTREAM_TROVE_EMAIL: my-trove@example.com + TROVE_ADMIN_USER: tomjon + TROVE_ADMIN_EMAIL: tomjon@example.com + TROVE_ADMIN_NAME: Tomjon of Lancre + TROVE_ADMIN_SSH_PUBKEY: my-trove/tomjon.key.pub + + These would be put into the cluster morphology used to do the + deployment. diff --git a/extensions/vagrant.configure b/extensions/vagrant.configure new file mode 100644 index 00000000..abc3ea0c --- /dev/null +++ b/extensions/vagrant.configure @@ -0,0 +1,55 @@ +#!/bin/sh +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License.5 +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +if test "x$VAGRANT" = "x"; then + exit 0 +fi + +for needed in etc/ssh/sshd_config etc/sudoers; do + if ! test -e "$ROOT/$needed"; then + echo >&2 "Unable to find $needed" + echo >&2 "Cannot continue configuring as Vagrant basebox" + exit 1 + fi +done + +# SSH daemon needs to be configured to not use DNS... +sed -i -e's/^(.*[Uu]][Ss][Ee][Dd][Nn][Ss].*)$/#\1/' "$ROOT/etc/ssh/sshd_config" +echo "UseDNS no" >> "$ROOT/etc/ssh/sshd_config" + +# We need to add a vagrant user with "vagrant" as the password We're doing this +# manually because chrooting in to run adduser is not really allowed for +# deployment time since we wouldn't be able to run the adduser necessarily. In +# practice for now we'd be able to because we can't deploy raw disks +# cross-platform and expect extlinux to install but we won't, for good +# practice and to hilight this deficiency. +echo 'vagrant:x:1000:1000:Vagrant User:/home/vagrant:/bin/bash' >> "$ROOT/etc/passwd" +echo 'vagrant:/6PTOoWylhw3w:16198:0:99999:7:::' >> "$ROOT/etc/shadow" +echo 'vagrant:x:1000:' >> "$ROOT/etc/group" +mkdir -p "$ROOT/home/vagrant" +chown -R 1000:1000 "$ROOT/home/vagrant" + +# Next, the vagrant user is meant to have sudo access +echo 'vagrant ALL=(ALL) NOPASSWD: ALL' >> "$ROOT/etc/sudoers" + +# And ensure that we get sbin in our path +echo 'PATH="$PATH:/sbin:/usr/sbin"' >> "$ROOT/etc/profile" +echo 'export PATH' >> "$ROOT/etc/profile" + diff --git a/hosts b/hosts deleted file mode 100644 index 5b97818d..00000000 --- a/hosts +++ /dev/null @@ -1 +0,0 @@ -localhost ansible_connection=local diff --git a/image-package-example/README b/image-package-example/README deleted file mode 100644 index c1322f25..00000000 --- a/image-package-example/README +++ /dev/null @@ -1,9 +0,0 @@ -Image package example scripts -============================= - -These are scripts used to create disk images or install the system onto -an existing disk. - -This is also implemented independently for the rawdisk.write write -extension; see morphlib.writeexts.WriteExtension.create_local_system() -for a similar, python implementation. diff --git a/image-package-example/common.sh.in b/image-package-example/common.sh.in deleted file mode 100644 index 9a7389a7..00000000 --- a/image-package-example/common.sh.in +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/false -# Script library to be used by disk-install.sh and make-disk-image.sh - -status(){ - echo "$@" -} - -info(){ - echo "$@" >&2 -} - -warn(){ - echo "$@" >&2 -} - -extract_rootfs(){ - tar -C "$1" -xf @@ROOTFS_TAR_PATH@@ . -} - -make_disk_image(){ - truncate --size "$1" "$2" -} - -format_disk(){ - local disk="$1" - mkfs.ext4 -F -L rootfs "$disk" -} - -install_fs_config(){ - local mountpoint="$1" - local rootdisk="${2-/dev/vda}" - cat >>"$mountpoint/etc/fstab" <&2 - exit 1 -} - -warn(){ - echo "$@" >&2 -} - -info(){ - echo "$@" >&2 -} - -shellescape(){ - echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" -} - -sedescape(){ - # Escape the passed in string so it can be safely interpolated into - # a sed expression as a literal value. - echo "$1" | sed -e 's/[\/&]/\\&/g' -} - -ROOTDIR="$1" -OUTPUT_TAR="$2" -td="$(mktemp -d)" -IMAGE_SUBDIR="${IMAGE_SUBDIR-image_files}" -SCRIPT_SUBDIR="${SCRIPT_SUBDIR-tools}" -ROOTFS_TAR="${ROOTFS_TAR-rootfs.tar}" - -# Generate shell snippets that will expand to paths to various resources -# needed by the scripts. -# They expand to a single shell word, so constructs like the following work -# SCRIPT_DIR=@@SCRIPT_DIR@@ -# dd if="$SCRIPT_DIR/mbr" of="$disk" count=1 -# tar -C "$mountpoint" -xf @@ROOTFS_TAR_PATH@@ . -find_script_dir='"$(readlink -f "$(dirname "$0")")"' -image_dir="$find_script_dir/../$(shellescape "$IMAGE_SUBDIR")" -rootfs_tar_path="$image_dir/$(shellescape "$ROOTFS_TAR")" - -install_script(){ - local source_file="$1" - local output_dir="$2" - local target_file="$output_dir/$SCRIPT_SUBDIR/$(basename "$source_file" .in)" - sed -e "s/@@SCRIPT_DIR@@/$(sedescape "$find_script_dir")/g" \ - -e "s/@@IMAGE_DIR@@/$(sedescape "$image_dir")/g" \ - -e "s/@@ROOTFS_TAR_PATH@@/$(sedescape "$rootfs_tar_path")/g" \ - "$source_file" \ - | install -D -m 755 /proc/self/fd/0 "$target_file" -} - -install_scripts(){ - local output_dir="$1" - ( - IFS="${INCLUDE_SCRIPTS_SEPARATOR-:}" - for script in $INCLUDE_SCRIPTS; do - local script_path="$(pwd)/$script" - if [ ! -e "$script_path" ]; then - warn Script "$script" not found, ignoring - continue - fi - install_script "$script" "$output_dir" - done - ) -} - -install_bootloader_blobs(){ - local output_dir="$1" - local image_dir="$output_dir/$IMAGE_SUBDIR" - ( - IFS="${BOOTLOADER_BLOBS_SEPARATOR-:}" - for blob in $BOOTLOADER_BLOBS; do - local blob_path="$ROOTDIR/$blob" - if [ ! -e "$blob_path" ]; then - warn Bootloader blob "$blob" not found, ignoring - continue - fi - install -D -m644 "$blob_path" "$image_dir/$(basename "$blob_path")" - done - ) -} - -# Determine a basename for our directory as the same as our tarball with -# extensions removed. This is needed, since tarball packages usually -# have a base directory of its contents, rather then extracting into the -# current directory. -output_dir="$(basename "$OUTPUT_TAR")" -for ext in .xz .bz2 .gzip .gz .tgz .tar; do - output_dir="${output_dir%$ext}" -done - -info Installing scripts -install_scripts "$td/$output_dir" - -info Installing bootloader blobs -install_bootloader_blobs "$td/$output_dir" - -info Writing rootfs tar to "$IMAGE_SUBDIR/$ROOTFS_TAR" -tar -C "$ROOTDIR" -c . \ -| sh -c "${ROOTFS_COMPRESS-cat}" >"$td/$output_dir/$IMAGE_SUBDIR/$ROOTFS_TAR" - -info Writing image package tar to "$OUTPUT_TAR" -tar -C "$td" -c "$output_dir" | sh -c "${OUTPUT_COMPRESS-cat}" >"$OUTPUT_TAR" diff --git a/installer.configure b/installer.configure deleted file mode 100755 index a77dc851..00000000 --- a/installer.configure +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to configure an installer -# system. It will create the configuration needed in the installer system -# to perform an installation. It uses the following variables from the -# environment: -# -# * INSTALLER_TARGET_STORAGE_DEVICE -# * INSTALLER_ROOTFS_TO_INSTALL -# * INSTALLER_POST_INSTALL_COMMAND (optional, defaults to `reboot -f`) - -import os -import sys -import yaml - -install_config_file = os.path.join(sys.argv[1], 'etc', 'install.conf') - -try: - installer_configuration = { - 'INSTALLER_TARGET_STORAGE_DEVICE': os.environ['INSTALLER_TARGET_STORAGE_DEVICE'], - 'INSTALLER_ROOTFS_TO_INSTALL': os.environ['INSTALLER_ROOTFS_TO_INSTALL'], - } -except KeyError as e: - print "Not configuring as an installer system" - sys.exit(0) - -postinstkey = 'INSTALLER_POST_INSTALL_COMMAND' -installer_configuration[postinstkey] = os.environ.get(postinstkey, 'reboot -f') - -with open(install_config_file, 'w') as f: - f.write( yaml.dump(installer_configuration, default_flow_style=False) ) - -print "Configuration of the installer system in %s" % install_config_file diff --git a/jffs2.write b/jffs2.write deleted file mode 100644 index 46b69a53..00000000 --- a/jffs2.write +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/python -#-*- coding: utf-8 -*- -# Copyright © 2015 Codethink Limited - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''A Morph deployment write extension for creating images with jffs2 - as the root filesystem.''' - - -import cliapp -import os - -import morphlib.writeexts - - -class Jffs2WriteExtension(morphlib.writeexts.WriteExtension): - - '''See jffs2.write.help for documentation.''' - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - - try: - self.create_jffs2_system(temp_root, location) - self.status(msg='Disk image has been created at %(location)s', - location = location) - except Exception: - self.status(msg='Failure to deploy system to %(location)s', - location = location) - raise - - def create_jffs2_system(self, temp_root, location): - erase_block = self.get_erase_block_size() - cliapp.runcmd( - ['mkfs.jffs2', '--pad', '--no-cleanmarkers', - '--eraseblock='+erase_block, '-d', temp_root, '-o', location]) - - def get_erase_block_size(self): - erase_block = os.environ.get('ERASE_BLOCK', '') - - if erase_block == '': - raise cliapp.AppException('ERASE_BLOCK was not given') - - if not erase_block.isdigit(): - raise cliapp.AppException('ERASE_BLOCK must be a whole number') - - return erase_block - -Jffs2WriteExtension().run() diff --git a/jffs2.write.help b/jffs2.write.help deleted file mode 100644 index 059a354b..00000000 --- a/jffs2.write.help +++ /dev/null @@ -1,28 +0,0 @@ -#-*- coding: utf-8 -*- -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Creates a system produced by Morph build with a jffs2 filesystem and then - writes to an image. To use this extension, the host system must have access - to mkfs.jffs2 which is provided in the mtd-utilities.morph stratum. - - Parameters: - - * location: the pathname of the disk image to be created/upgraded, or the - path to the physical device. - - * ERASE_BLOCK: the erase block size of the target system, which can be - found in '/sys/class/mtd/mtdx/erasesize' diff --git a/mason.configure b/mason.configure deleted file mode 100644 index 1198ebd0..00000000 --- a/mason.configure +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to fully configure -# a Mason instance at deployment time. It uses the following variables -# from the environment: -# -# * ARTIFACT_CACHE_SERVER -# * MASON_CLUSTER_MORPHOLOGY -# * MASON_DEFINITIONS_REF -# * MASON_DISTBUILD_ARCH -# * MASON_TEST_HOST -# * OPENSTACK_NETWORK_ID -# * TEST_INFRASTRUCTURE_TYPE -# * TROVE_HOST -# * TROVE_ID -# * CONTROLLERHOST - -set -e - -########################################################################## -# Copy Mason files into root filesystem -########################################################################## - -ROOT="$1" - -mkdir -p "$ROOT"/usr/lib/mason -cp mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh -cp mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh -cp mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script - -cp mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer - -cp mason/mason.service "$ROOT"/etc/systemd/system/mason.service - -########################################################################## -# Set up httpd web server -########################################################################## - -cp mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service - -mkdir -p "$ROOT"/srv/mason - -cat >>"$ROOT"/etc/httpd.conf <"$MASON_DATA/mason.conf" -import os, sys, yaml - -mason_configuration={ - 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'], - 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'], - 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'], - 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'], - 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'], - 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'], - 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'], - 'TROVE_ID': os.environ['TROVE_ID'], - 'TROVE_HOST': os.environ['TROVE_HOST'], - 'CONTROLLERHOST': os.environ['CONTROLLERHOST'], -} - -yaml.dump(mason_configuration, sys.stdout, default_flow_style=False) -EOF - -if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then - python <<'EOF' >>"$MASON_DATA/mason.conf" -import os, sys, yaml - -openstack_credentials={ - 'OS_USERNAME': os.environ['OPENSTACK_USER'], - 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'], - 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'], - 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'], - 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'], -} - -yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False) -EOF -fi - -########################################################################## -# Enable services -########################################################################## - -ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer -ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service diff --git a/moonshot-kernel.configure b/moonshot-kernel.configure deleted file mode 100644 index 11d01751..00000000 --- a/moonshot-kernel.configure +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to convert a plain -# kernel Image to uImage, for an HP Moonshot m400 cartridge - -set -eu - -case "$MOONSHOT_KERNEL" in - True|yes) - echo "Converting kernel image for Moonshot" - mkimage -A arm -O linux -C none -T kernel -a 0x00080000 \ - -e 0x00080000 -n Linux -d "$1/boot/vmlinux" "$1/boot/uImage" - ;; - *) - echo Unrecognised option "$MOONSHOT_KERNEL" to MOONSHOT_KERNEL - exit 1 - ;; -esac diff --git a/nfsboot-server.configure b/nfsboot-server.configure deleted file mode 100755 index 9fb48096..00000000 --- a/nfsboot-server.configure +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2013-2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# This is a "morph deploy" configuration extension to set up a server for -# booting over nfs and tftp. -set -e - -ROOT="$1" - -########################################################################## - -nfsboot_root=/srv/nfsboot -tftp_root="$nfsboot_root"/tftp -nfs_root="$nfsboot_root"/nfs -mkdir -p "$ROOT$tftp_root" "$ROOT$nfs_root" - -install -D /dev/stdin "$ROOT/usr/lib/systemd/system/nfsboot-tftp.service" <&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool CEILOMETER_ENABLE_CONTROLLER -check_bool CEILOMETER_ENABLE_COMPUTE - -if ! "$CEILOMETER_ENABLE_CONTROLLER" && \ - ! "$CEILOMETER_ENABLE_COMPUTE"; then - exit 0 -fi - -if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$CEILOMETER_SERVICE_USER" -o \ - -z "$CEILOMETER_SERVICE_PASSWORD" -o \ - -z "$CEILOMETER_DB_USER" -o \ - -z "$CEILOMETER_DB_PASSWORD" -o \ - -z "$METERING_SECRET" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$CONTROLLER_HOST_ADDRESS" ]; then - echo Some options required for Ceilometer were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -if "$CEILOMETER_ENABLE_COMPUTE" || "$CEILOMETER_ENABLE_CONTROLLER"; then - enable openstack-ceilometer-config-setup -fi -if "$CEILOMETER_ENABLE_COMPUTE"; then - enable openstack-ceilometer-compute -fi -if "$CEILOMETER_ENABLE_CONTROLLER"; then - enable openstack-ceilometer-db-setup - enable openstack-ceilometer-api - enable openstack-ceilometer-collector - enable openstack-ceilometer-notification - enable openstack-ceilometer-central - enable openstack-ceilometer-alarm-evaluator - enable openstack-ceilometer-alarm-notifier -fi - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/ceilometer.conf" -import os, sys, yaml - -ceilometer_configuration={ - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'CEILOMETER_SERVICE_PASSWORD': os.environ['CEILOMETER_SERVICE_PASSWORD'], - 'CEILOMETER_SERVICE_USER': os.environ['CEILOMETER_SERVICE_USER'], - 'CEILOMETER_DB_USER': os.environ['CEILOMETER_DB_USER'], - 'CEILOMETER_DB_PASSWORD': os.environ['CEILOMETER_DB_PASSWORD'], - 'METERING_SECRET': os.environ['METERING_SECRET'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], -} - -yaml.dump(ceilometer_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-cinder.configure b/openstack-cinder.configure deleted file mode 100644 index 4c32e11a..00000000 --- a/openstack-cinder.configure +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool CINDER_ENABLE_CONTROLLER -check_bool CINDER_ENABLE_COMPUTE -check_bool CINDER_ENABLE_STORAGE - -if ! "$CINDER_ENABLE_CONTROLLER" && \ - ! "$CINDER_ENABLE_COMPUTE" && \ - ! "$CINDER_ENABLE_STORAGE"; then - exit 0 -fi - -if [ -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$CINDER_DB_USER" -o \ - -z "$CINDER_DB_PASSWORD" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$CINDER_SERVICE_USER" -o \ - -z "$CINDER_SERVICE_PASSWORD" -o \ - -z "$CINDER_DEVICE" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" ]; then - echo Some options required for Cinder were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_STORAGE"; then - enable iscsi-setup - enable target #target.service! - enable iscsid -fi -if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_CONTROLLER" || "$CINDER_ENABLE_STORAGE"; then - enable openstack-cinder-config-setup -fi -if "$CINDER_ENABLE_STORAGE"; then - enable openstack-cinder-lv-setup - enable lvm2-lvmetad - enable openstack-cinder-volume - enable openstack-cinder-backup - enable openstack-cinder-scheduler -fi -if "$CINDER_ENABLE_CONTROLLER"; then - enable openstack-cinder-db-setup - enable openstack-cinder-api -fi - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/cinder.conf" -import os, sys, yaml - -cinder_configuration={ - 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN':os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'CINDER_DB_USER':os.environ['CINDER_DB_USER'], - 'CINDER_DB_PASSWORD':os.environ['CINDER_DB_PASSWORD'], - 'CONTROLLER_HOST_ADDRESS':os.environ['CONTROLLER_HOST_ADDRESS'], - 'CINDER_SERVICE_USER':os.environ['CINDER_SERVICE_USER'], - 'CINDER_SERVICE_PASSWORD':os.environ['CINDER_SERVICE_PASSWORD'], - 'CINDER_DEVICE':os.environ['CINDER_DEVICE'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS':os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], -} - -yaml.dump(cinder_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-glance.configure b/openstack-glance.configure deleted file mode 100644 index 5da08895..00000000 --- a/openstack-glance.configure +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool GLANCE_ENABLE_SERVICE - -if ! "$GLANCE_ENABLE_SERVICE"; then - exit 0 -fi - -if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$GLANCE_SERVICE_USER" -o \ - -z "$GLANCE_SERVICE_PASSWORD" -o \ - -z "$GLANCE_DB_USER" -o \ - -z "$GLANCE_DB_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$CONTROLLER_HOST_ADDRESS" ]; then - echo Some options required for Glance were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -enable openstack-glance-setup - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/glance.conf" -import os, sys, yaml - -glance_configuration={ - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'GLANCE_SERVICE_PASSWORD': os.environ['GLANCE_SERVICE_PASSWORD'], - 'GLANCE_SERVICE_USER': os.environ['GLANCE_SERVICE_USER'], - 'GLANCE_DB_USER': os.environ['GLANCE_DB_USER'], - 'GLANCE_DB_PASSWORD': os.environ['GLANCE_DB_PASSWORD'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], -} - -yaml.dump(glance_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-ironic.configure b/openstack-ironic.configure deleted file mode 100644 index 962bbcd1..00000000 --- a/openstack-ironic.configure +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool IRONIC_ENABLE_SERVICE - -if ! "$IRONIC_ENABLE_SERVICE"; then - exit 0 -fi - -if [ -z "$IRONIC_SERVICE_USER" -o \ - -z "$IRONIC_SERVICE_PASSWORD" -o \ - -z "$IRONIC_DB_USER" -o \ - -z "$IRONIC_DB_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then - echo Some options required for Ironic were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -enable openstack-ironic-setup -enable iscsi-setup -enable target #target.service! -enable iscsid - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/ironic.conf" -import os, sys, yaml - -ironic_configuration={ - 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], - 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], - 'IRONIC_DB_USER': os.environ['IRONIC_DB_USER'], - 'IRONIC_DB_PASSWORD': os.environ['IRONIC_DB_PASSWORD'], - 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - -} - -yaml.dump(ironic_configuration, sys.stdout, default_flow_style=False) -EOF - -########################################################################## -# Configure the TFTP service # -########################################################################## - -tftp_root="/srv/tftp_root/" # trailing slash is essential -mkdir -p "$ROOT/$tftp_root" - -install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.service" << 'EOF' -[Unit] -Description=tftp service for booting kernels -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -EnvironmentFile=/etc/tftp-hpa.conf -ExecStart=/usr/sbin/in.tftpd $TFTP_OPTIONS ${TFTP_ROOT} -StandardInput=socket -StandardOutput=inherit -StandardError=journal - -[Install] -WantedBy=multi-user.target -EOF - -install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.socket" << EOF -[Unit] -Description=Tftp server activation socket - -[Socket] -ListenDatagram=$MANAGEMENT_INTERFACE_IP_ADDRESS:69 -FreeBind=yes - -[Install] -WantedBy=sockets.target -EOF - -install -D -m 644 /dev/stdin "$ROOT"/etc/tftp-hpa.conf << EOF -TFTP_ROOT=$tftp_root -TFTP_OPTIONS="-v -v -v -v -v --map-file $tftp_root/map-file" -EOF - -install -D /dev/stdin -m 644 "$ROOT/$tftp_root"/map-file << EOF -r ^([^/]) $tftp_root\1 -r ^/tftpboot/ $tftp_root\2 -EOF - -cp "$ROOT"/usr/share/syslinux/pxelinux.0 "$ROOT/$tftp_root" diff --git a/openstack-keystone.configure b/openstack-keystone.configure deleted file mode 100644 index 6b011b14..00000000 --- a/openstack-keystone.configure +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool KEYSTONE_ENABLE_SERVICE - -if ! "$KEYSTONE_ENABLE_SERVICE"; then - exit 0 -fi - -if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$KEYSTONE_ADMIN_PASSWORD" -o \ - -z "$KEYSTONE_DB_USER" -o \ - -z "$KEYSTONE_DB_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$CONTROLLER_HOST_ADDRESS" ]; then - echo Some options required for Keystone were defined, but not all. - exit 1 -fi - -python <<'EOF' -import socket -import sys -import os - -try: - socket.inet_pton(socket.AF_INET, os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS']) -except: - print "Error: MANAGEMENT_INTERFACE_IP_ADDRESS is not a valid IP" - sys.exit(1) -EOF - -###################################### -# Enable relevant openstack services # -###################################### - -enable openstack-keystone-setup -enable openstack-horizon-setup -enable postgres-server-setup - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/keystone.conf" -import os, sys, yaml - -keystone_configuration={ - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'KEYSTONE_ADMIN_PASSWORD': os.environ['KEYSTONE_ADMIN_PASSWORD'], - 'KEYSTONE_DB_USER': os.environ['KEYSTONE_DB_USER'], - 'KEYSTONE_DB_PASSWORD': os.environ['KEYSTONE_DB_PASSWORD'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], -} - -yaml.dump(keystone_configuration, sys.stdout, default_flow_style=False) -EOF - -python << 'EOF' > "$OPENSTACK_DATA/postgres.conf" -import os, sys, yaml - -postgres_configuration={ - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], -} - -yaml.dump(postgres_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-network.configure b/openstack-network.configure deleted file mode 100644 index 10be5a1c..00000000 --- a/openstack-network.configure +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -################### -# Enable services # -################### - -enable openvswitch-setup -enable openstack-network-setup - -########################################################################## -# Generate config variable shell snippet -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/network.conf" -import os, sys, yaml - -network_configuration = {} - -optional_keys = ('EXTERNAL_INTERFACE',) - -network_configuration.update((k, os.environ[k]) for k in optional_keys if k in os.environ) - -yaml.dump(network_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-neutron.configure b/openstack-neutron.configure deleted file mode 100644 index 210222db..00000000 --- a/openstack-neutron.configure +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/openstack-neutron-$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-neutron-$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool NEUTRON_ENABLE_CONTROLLER -check_bool NEUTRON_ENABLE_MANAGER -check_bool NEUTRON_ENABLE_AGENT - -if ! "$NEUTRON_ENABLE_CONTROLLER" && \ - ! "$NEUTRON_ENABLE_MANAGER" && \ - ! "$NEUTRON_ENABLE_AGENT"; then - exit 0 -fi - -if [ -z "$NEUTRON_SERVICE_USER" -o \ - -z "$NEUTRON_SERVICE_PASSWORD" -o \ - -z "$NEUTRON_DB_USER" -o \ - -z "$NEUTRON_DB_PASSWORD" -o \ - -z "$METADATA_PROXY_SHARED_SECRET" -o \ - -z "$NOVA_SERVICE_USER" -o \ - -z "$NOVA_SERVICE_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then - echo Some options required for Neutron were defined, but not all. - exit 1 -fi - -############################################# -# Ensure /var/run is an appropriate symlink # -############################################# - -if ! link="$(readlink "$ROOT/var/run")" || [ "$link" != ../run ]; then - rm -rf "$ROOT/var/run" - ln -s ../run "$ROOT/var/run" -fi - -################### -# Enable services # -################### - -if "$NEUTRON_ENABLE_CONTROLLER"; then - enable config-setup - enable db-setup - enable server -fi - -if "$NEUTRON_ENABLE_MANAGER"; then - enable config-setup - enable ovs-cleanup - enable dhcp-agent - enable l3-agent - enable plugin-openvswitch-agent - enable metadata-agent -fi - -if "$NEUTRON_ENABLE_AGENT"; then - enable config-setup - enable plugin-openvswitch-agent -fi - -########################################################################## -# Generate config variable shell snippet -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/neutron.conf" -import os, sys, yaml - -nova_configuration={ - 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], - 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], - 'NEUTRON_DB_USER': os.environ['NEUTRON_DB_USER'], - 'NEUTRON_DB_PASSWORD': os.environ['NEUTRON_DB_PASSWORD'], - 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], - 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], - 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], -} - -yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-nova.configure b/openstack-nova.configure deleted file mode 100644 index 213f1852..00000000 --- a/openstack-nova.configure +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/openstack-nova-$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool NOVA_ENABLE_CONTROLLER -check_bool NOVA_ENABLE_COMPUTE - -if ! "$NOVA_ENABLE_CONTROLLER" && \ - ! "$NOVA_ENABLE_COMPUTE"; then - exit 0 -fi - -if [ -z "$NOVA_SERVICE_USER" -o \ - -z "$NOVA_SERVICE_PASSWORD" -o \ - -z "$NOVA_DB_USER" -o \ - -z "$NOVA_DB_PASSWORD" -o \ - -z "$NOVA_VIRT_TYPE" -o \ - -z "$NEUTRON_SERVICE_USER" -o \ - -z "$NEUTRON_SERVICE_PASSWORD" -o \ - -z "$IRONIC_SERVICE_USER" -a \ - -z "$IRONIC_SERVICE_PASSWORD" -a \ - -z "$METADATA_PROXY_SHARED_SECRET" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then - echo Some options required for Nova were defined, but not all. - exit 1 -fi - -############################################### -# Enable libvirtd and libvirt-guests services # -############################################### - -wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants -mkdir -p "$wants_dir" -mkdir -p "$ROOT"/var/lock/subsys -ln -sf ../libvirtd.service "$wants_dir/libvirtd.service" - -###################################### -# Enable relevant openstack services # -###################################### - -if "$NOVA_ENABLE_CONTROLLER" || "$NOVA_ENABLE_COMPUTE"; then - enable config-setup -fi -if "$NOVA_ENABLE_CONTROLLER" && ! "$NOVA_ENABLE_COMPUTE"; then - enable conductor -fi -if "$NOVA_ENABLE_COMPUTE"; then - enable compute -fi -if "$NOVA_ENABLE_CONTROLLER"; then - for service in db-setup api cert consoleauth novncproxy scheduler serialproxy; do - enable "$service" - done -fi - -########################################################################## -# Change iprange for the interal libvirt to avoid clashes -# with eth0 ip range -########################################################################## - -sed -i "s/192\.168\.122\./192\.168\.1\./g" \ - "$ROOT"/etc/libvirt/qemu/networks/default.xml - - -########################################################################## -# Generate configuration file -########################################################################## - -case "$NOVA_BAREMETAL_SCHEDULING" in - True|true|yes) - export COMPUTE_MANAGER=ironic.nova.compute.manager.ClusteredComputeManager - export RESERVED_HOST_MEMORY_MB=0 - export SCHEDULER_HOST_MANAGER=nova.scheduler.ironic_host_manager.IronicHostManager - export RAM_ALLOCATION_RATIO=1.0 - export COMPUTE_DRIVER=nova.virt.ironic.IronicDriver - ;; - *) - export COMPUTE_MANAGER=nova.compute.manager.ComputeManager - export RESERVED_HOST_MEMORY_MB=512 - export SCHEDULER_HOST_MANAGER=nova.scheduler.host_manager.HostManager - export RAM_ALLOCATION_RATIO=1.5 - export COMPUTE_DRIVER=libvirt.LibvirtDriver - ;; -esac - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/nova.conf" -import os, sys, yaml - -nova_configuration={ - 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], - 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], - 'NOVA_DB_USER': os.environ['NOVA_DB_USER'], - 'NOVA_DB_PASSWORD': os.environ['NOVA_DB_PASSWORD'], - 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'], - 'COMPUTE_MANAGER': os.environ['COMPUTE_MANAGER'], - 'RESERVED_HOST_MEMORY_MB': os.environ['RESERVED_HOST_MEMORY_MB'], - 'SCHEDULER_HOST_MANAGER': os.environ['SCHEDULER_HOST_MANAGER'], - 'RAM_ALLOCATION_RATIO': os.environ['RAM_ALLOCATION_RATIO'], - 'COMPUTE_DRIVER': os.environ['COMPUTE_DRIVER'], - 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], - 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], - 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], - 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], - 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], -} - -yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-swift-controller.configure b/openstack-swift-controller.configure deleted file mode 100644 index 424ab57b..00000000 --- a/openstack-swift-controller.configure +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -# -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -set -e - -export ROOT="$1" - -MANDATORY_OPTIONS="SWIFT_ADMIN_PASSWORD KEYSTONE_TEMPORARY_ADMIN_TOKEN" - -for option in $MANDATORY_OPTIONS -do - if ! [[ -v $option ]] - then - missing_option=True - echo "Required option $option isn't set!" >&2 - fi -done - -if [[ $missing_option = True ]]; then exit 1; fi - -mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks - -ln -s "/usr/lib/systemd/system/swift-controller-setup.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-controller-setup.service" -ln -s "/usr/lib/systemd/system/memcached.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/memcached.service" -ln -s "/usr/lib/systemd/system/openstack-swift-proxy.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-proxy.service" - -cat << EOF > "$ROOT"/usr/share/openstack/swift-controller-vars.yml ---- -SWIFT_ADMIN_PASSWORD: $SWIFT_ADMIN_PASSWORD -MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS -KEYSTONE_TEMPORARY_ADMIN_TOKEN: $KEYSTONE_TEMPORARY_ADMIN_TOKEN -EOF diff --git a/pxeboot.check b/pxeboot.check deleted file mode 100755 index 611708a9..00000000 --- a/pxeboot.check +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/python - -import itertools -import os -import subprocess -import sys -flatten = itertools.chain.from_iterable - -def powerset(iterable): - "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" - s = list(iterable) - return flatten(itertools.combinations(s, r) for r in range(len(s)+1)) - -valid_option_sets = frozenset(( - ('spawn-novlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',))), - ('spawn-vlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', 'PXEBOOT_VLAN'))), - ('existing-dhcp', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', - 'PXEBOOT_CONFIG_TFTP_ADDRESS'))), - ('existing-server', frozenset(('PXEBOOT_CONFIG_TFTP_ADDRESS', - 'PXEBOOT_ROOTFS_RSYNC_ADDRESS'))), -)) -valid_modes = frozenset(mode for mode, opt_set in valid_option_sets) - - -def compute_matches(env): - complete_matches = set() - for mode, opt_set in valid_option_sets: - if all(k in env for k in opt_set): - complete_matches.add(opt_set) - return complete_matches - -complete_matches = compute_matches(os.environ) - -def word_separate_options(options): - assert options - s = options.pop(-1) - if options: - s = '%s and %s' % (', '.join(options), s) - return s - - -valid_options = frozenset(flatten(opt_set for (mode, opt_set) - in valid_option_sets)) -matched_options = frozenset(o for o in valid_options - if o in os.environ) -if not complete_matches: - addable_sets = frozenset(frozenset(os) - matched_options for os in - valid_options - if frozenset(os) - matched_options) - print('Please provide %s' % ' or '.join( - word_separate_options(list(opt_set)) - for opt_set in addable_sets if opt_set)) - sys.exit(1) -elif len(complete_matches) > 1: - removable_sets = frozenset(matched_options - frozenset(os) for os in - powerset(matched_options) - if len(compute_matches(os)) == 1) - print('WARNING: Following options might not be needed: %s' % ' or '.join( - word_separate_options(list(opt_set)) - for opt_set in removable_sets if opt_set)) - -if 'PXEBOOT_MODE' in os.environ: - mode = os.environ['PXEBOOT_MODE'] -else: - try: - mode, = (mode for (mode, opt_set) in valid_option_sets - if all(o in os.environ for o in opt_set)) - - except ValueError as e: - print ('More than one candidate for PXEBOOT_MODE, please ' - 'set a value for it. Type `morph help pxeboot.write for ' - 'more info') - sys.exit(1) - -if mode not in valid_modes: - print('%s is not a valid PXEBOOT_MODE' % mode) - sys.exit(1) - -if mode != 'existing-server': - with open(os.devnull, 'w') as devnull: - if subprocess.call(['systemctl', 'is-active', 'nfs-server'], - stdout=devnull) != 0: - print ('ERROR: nfs-server.service is not running and is needed ' - 'for this deployment. Please, run `systemctl start nfs-server` ' - 'and try `morph deploy` again.') - sys.exit(1) diff --git a/pxeboot.write b/pxeboot.write deleted file mode 100644 index 3a12ebcc..00000000 --- a/pxeboot.write +++ /dev/null @@ -1,755 +0,0 @@ -#!/usr/bin/env python - - -import collections -import contextlib -import errno -import itertools -import logging -import os -import select -import signal -import shutil -import socket -import string -import StringIO -import subprocess -import sys -import tempfile -import textwrap -import urlparse - -import cliapp - -import morphlib - - -def _int_to_quad_dot(i): - return '.'.join(( - str(i >> 24 & 0xff), - str(i >> 16 & 0xff), - str(i >> 8 & 0xff), - str(i & 0xff))) - - -def _quad_dot_to_int(s): - i = 0 - for octet in s.split('.'): - i <<= 8 - i += int(octet, 10) - return i - - -def _netmask_to_prefixlen(mask): - bs = '{:032b}'.format(mask) - prefix = bs.rstrip('0') - if '0' in prefix: - raise ValueError('abnormal netmask: %s' % - _int_to_quad_dot(mask)) - return len(prefix) - - -def _get_routes(): - routes = [] - with open('/proc/net/route', 'r') as f: - for line in list(f)[1:]: - fields = line.split() - destination, flags, mask = fields[1], fields[3], fields[7] - flags = int(flags, 16) - if flags & 2: - # default route, ignore - continue - destination = socket.ntohl(int(destination, 16)) - mask = socket.ntohl(int(mask, 16)) - prefixlen = _netmask_to_prefixlen(mask) - routes.append((destination, prefixlen)) - return routes - - -class IPRange(object): - def __init__(self, prefix, prefixlen): - self.prefixlen = prefixlen - mask = (1 << prefixlen) - 1 - self.mask = mask << (32 - prefixlen) - self.prefix = prefix & self.mask - @property - def bitstring(self): - return ('{:08b}' * 4).format( - self.prefix >> 24 & 0xff, - self.prefix >> 16 & 0xff, - self.prefix >> 8 & 0xff, - self.prefix & 0xff - )[:self.prefixlen] - def startswith(self, other_range): - return self.bitstring.startswith(other_range.bitstring) - - -def find_subnet(valid_ranges, invalid_ranges): - for vr in valid_ranges: - known_subnets = set(ir for ir in invalid_ranges if ir.startswith(vr)) - prefixlens = set(r.prefixlen for r in known_subnets) - prefixlens.add(32 - 2) # need at least 4 addresses in subnet - prefixlen = min(prefixlens) - if prefixlen <= vr.prefixlen: - # valid subnet is full, move on to next - continue - subnetlen = prefixlen - vr.prefixlen - for prefix in (subnetid + vr.prefix - for subnetid in xrange(1 << subnetlen)): - if any(subnet.prefix == prefix for subnet in known_subnets): - continue - return prefix, prefixlen - - -def _normalise_macaddr(macaddr): - '''pxelinux.0 wants the mac address to be lowercase and - separated''' - digits = (c for c in macaddr.lower() if c in string.hexdigits) - nibble_pairs = grouper(digits, 2) - return '-'.join(''.join(byte) for byte in nibble_pairs) - - -@contextlib.contextmanager -def executor(target_pid): - 'Kills a process if its parent dies' - read_fd, write_fd = os.pipe() - helper_pid = os.fork() - if helper_pid == 0: - try: - os.close(write_fd) - while True: - rlist, _, _ = select.select([read_fd], [], []) - if read_fd in rlist: - d = os.read(read_fd, 1) - if not d: - os.kill(target_pid, signal.SIGKILL) - if d in ('', 'Q'): - os._exit(0) - else: - os._exit(1) - except BaseException as e: - import traceback - traceback.print_exc() - os._exit(1) - os.close(read_fd) - yield - os.write(write_fd, 'Q') - os.close(write_fd) - - -def grouper(iterable, n, fillvalue=None): - "Collect data into fixed-length chunks or blocks" - # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" - args = [iter(iterable)] * n - return itertools.izip_longest(*args, fillvalue=fillvalue) - - -class PXEBoot(morphlib.writeexts.WriteExtension): - @contextlib.contextmanager - def _vlan(self, interface, vlan): - viface = '%s.%s' % (interface, vlan) - self.status(msg='Creating vlan %(viface)s', viface=viface) - subprocess.check_call(['vconfig', 'add', interface, str(vlan)]) - try: - yield viface - finally: - self.status(msg='Destroying vlan %(viface)s', viface=viface) - subprocess.call(['vconfig', 'rem', viface]) - - @contextlib.contextmanager - def _static_ip(self, iface): - valid_ranges = set(( - IPRange(_quad_dot_to_int('192.168.0.0'), 16), - IPRange(_quad_dot_to_int('172.16.0.0'), 12), - IPRange(_quad_dot_to_int('10.0.0.0'), 8), - )) - invalid_ranges = set(IPRange(prefix, prefixlen) - for (prefix, prefixlen) in _get_routes()) - prefix, prefixlen = find_subnet(valid_ranges, invalid_ranges) - netaddr = prefix - dhcp_server_ip = netaddr + 1 - client_ip = netaddr + 2 - broadcast_ip = prefix | ((1 << (32 - prefixlen)) - 1) - self.status(msg='Assigning ip address %(ip)s/%(prefixlen)d to ' - 'iface %(iface)s', - ip=_int_to_quad_dot(dhcp_server_ip), prefixlen=prefixlen, - iface=iface) - subprocess.check_call(['ip', 'addr', 'add', - '{}/{}'.format(_int_to_quad_dot(dhcp_server_ip), - prefixlen), - 'broadcast', _int_to_quad_dot(broadcast_ip), - 'scope', 'global', - 'dev', iface]) - try: - yield (dhcp_server_ip, client_ip, broadcast_ip) - finally: - self.status(msg='Removing ip addresses from iface %(iface)s', - iface=iface) - subprocess.call(['ip', 'addr', 'flush', 'dev', iface]) - - @contextlib.contextmanager - def _up_interface(self, iface): - self.status(msg='Bringing interface %(iface)s up', iface=iface) - subprocess.check_call(['ip', 'link', 'set', iface, 'up']) - try: - yield - finally: - self.status(msg='Bringing interface %(iface)s down', iface=iface) - subprocess.call(['ip', 'link', 'set', iface, 'down']) - - @contextlib.contextmanager - def static_ip(self, interface): - with self._static_ip(iface=interface) as (host_ip, client_ip, - broadcast_ip), \ - self._up_interface(iface=interface): - yield (_int_to_quad_dot(host_ip), - _int_to_quad_dot(client_ip), - _int_to_quad_dot(broadcast_ip)) - - @contextlib.contextmanager - def vlan(self, interface, vlan): - with self._vlan(interface=interface, vlan=vlan) as viface, \ - self.static_ip(interface=viface) \ - as (host_ip, client_ip, broadcast_ip): - yield host_ip, client_ip, broadcast_ip - - @contextlib.contextmanager - def _tempdir(self): - td = tempfile.mkdtemp() - print 'Created tempdir:', td - try: - yield td - finally: - shutil.rmtree(td, ignore_errors=True) - - @contextlib.contextmanager - def _remote_tempdir(self, hostname, template): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - td = cliapp.ssh_runcmd(hostname, ['mktemp', '-d', template]).strip() - try: - yield td - finally: - if not persist: - cliapp.ssh_runcmd(hostname, ['find', td, '-delete']) - - def _serve_tftpd(self, sock, host, port, interface, tftproot): - self.settings.progname = 'tftp server' - self._set_process_name() - while True: - logging.debug('tftpd waiting for connections') - # recvfrom with MSG_PEEK is how you accept UDP connections - _, peer = sock.recvfrom(0, socket.MSG_PEEK) - conn = sock - logging.debug('Connecting socket to peer: ' + repr(peer)) - conn.connect(peer) - # The existing socket is now only serving that peer, so we need to - # bind a new UDP socket to the wildcard address, which needs the - # port to be in REUSEADDR mode. - conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - logging.debug('Binding replacement socket to ' + repr((host, port))) - sock.bind((host, port)) - - logging.debug('tftpd server handing connection to tftpd') - tftpd_serve = ['tftpd', '-rl', tftproot] - ret = subprocess.call(args=tftpd_serve, stdin=conn, - stdout=conn, stderr=None, close_fds=True) - # It's handy to turn off REUSEADDR after the rebinding, - # so we can protect against future bind attempts on this port. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0) - logging.debug('tftpd exited %d' % ret) - os._exit(0) - - @contextlib.contextmanager - def _spawned_tftp_server(self, tftproot, host_ip, interface, tftp_port=0): - # inetd-style launchers tend to bind UDP ports with SO_REUSEADDR, - # because they need to have multiple ports bound, one for recieving - # all connection attempts on that port, and one for each concurrent - # connection with a peer - # this makes detecting whether there's a tftpd running difficult, so - # we'll instead use an ephemeral port and configure the PXE boot to - # use that tftp server for the kernel - s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) - s.bind((host_ip, tftp_port)) - host, port = s.getsockname() - self.status(msg='Bound listen socket to %(host)s, %(port)s', - host=host, port=port) - pid = os.fork() - if pid == 0: - try: - self._serve_tftpd(sock=s, host=host, port=port, - interface=interface, tftproot=tftproot) - except BaseException as e: - import traceback - traceback.print_exc() - os._exit(1) - s.close() - with executor(pid): - try: - yield port - finally: - self.status(msg='Killing tftpd listener pid=%(pid)d', - pid=pid) - os.kill(pid, signal.SIGKILL) - - @contextlib.contextmanager - def tftp_server(self, host_ip, interface, tftp_port=0): - with self._tempdir() as tftproot, \ - self._spawned_tftp_server(tftproot=tftproot, host_ip=host_ip, - interface=interface, - tftp_port=tftp_port) as tftp_port: - self.status(msg='Serving tftp root %(tftproot)s, on port %(port)d', - port=tftp_port, tftproot=tftproot) - yield tftp_port, tftproot - - @contextlib.contextmanager - def _local_copy(self, src, dst): - self.status(msg='Installing %(src)s to %(dst)s', - src=src, dst=dst) - shutil.copy2(src=src, dst=dst) - try: - yield - finally: - self.status(msg='Removing %(dst)s', dst=dst) - os.unlink(dst) - - @contextlib.contextmanager - def _local_symlink(self, src, dst): - os.symlink(src, dst) - try: - yield - finally: - os.unlink(dst) - - def local_pxelinux(self, tftproot): - return self._local_copy('/usr/share/syslinux/pxelinux.0', - os.path.join(tftproot, 'pxelinux.0')) - - def local_kernel(self, rootfs, tftproot): - return self._local_copy(os.path.join(rootfs, 'boot/vmlinuz'), - os.path.join(tftproot, 'kernel')) - - @contextlib.contextmanager - def _remote_copy(self, hostname, src, dst): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - with open(src, 'r') as f: - cliapp.ssh_runcmd(hostname, - ['install', '-D', '-m644', '/proc/self/fd/0', - dst], stdin=f, stdout=None, stderr=None) - try: - yield - finally: - if not persist: - cliapp.ssh_runcmd(hostname, ['rm', dst]) - - @contextlib.contextmanager - def _remote_symlink(self, hostname, src, dst): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - cliapp.ssh_runcmd(hostname, - ['ln', '-s', '-f', src, dst], - stdin=None, stdout=None, stderr=None) - try: - yield - finally: - if not persist: - cliapp.ssh_runcmd(hostname, ['rm', '-f', dst]) - - @contextlib.contextmanager - def remote_kernel(self, rootfs, tftp_url, macaddr): - for name in ('vmlinuz', 'zImage', 'uImage'): - kernel_path = os.path.join(rootfs, 'boot', name) - if os.path.exists(kernel_path): - break - else: - raise cliapp.AppException('Failed to locate kernel') - url = urlparse.urlsplit(tftp_url) - basename = '{}-kernel'.format(_normalise_macaddr(macaddr)) - target_path = os.path.join(url.path, basename) - with self._remote_copy(hostname=url.hostname, src=kernel_path, - dst=target_path): - yield basename - - @contextlib.contextmanager - def remote_fdt(self, rootfs, tftp_url, macaddr): - fdt_rel_path = os.environ.get('DTB_PATH', '') - if fdt_rel_path == '': - yield - fdt_abs_path = os.path.join(rootfs, fdt_rel_path) - if not fdt_abs_path: - raise cliapp.AppException('Failed to locate Flattened Device Tree') - url = urlparse.urlsplit(tftp_url) - basename = '{}-fdt'.format(_normalise_macaddr(macaddr)) - target_path = os.path.join(url.path, basename) - with self._remote_copy(hostname=url.hostname, src=fdt_abs_path, - dst=target_path): - yield basename - - @contextlib.contextmanager - def local_nfsroot(self, rootfs, target_ip): - nfsroot = target_ip + ':' + rootfs - self.status(msg='Exporting %(nfsroot)s as local nfsroot', - nfsroot=nfsroot) - cliapp.runcmd(['exportfs', '-o', 'ro,insecure,no_root_squash', - nfsroot]) - try: - yield - finally: - self.status(msg='Removing %(nfsroot)s from local nfsroots', - nfsroot=nfsroot) - cliapp.runcmd(['exportfs', '-u', nfsroot]) - - @contextlib.contextmanager - def remote_nfsroot(self, rootfs, rsync_url, macaddr): - url = urlparse.urlsplit(rsync_url) - template = os.path.join(url.path, - _normalise_macaddr(macaddr) + '.XXXXXXXXXX') - with self._remote_tempdir(hostname=url.hostname, template=template) \ - as tempdir: - nfsroot = urlparse.urlunsplit((url.scheme, url.netloc, tempdir, - url.query, url.fragment)) - cliapp.runcmd(['rsync', '-asSPH', '--delete', rootfs, nfsroot], - stdin=None, stdout=open(os.devnull, 'w'), - stderr=None) - yield os.path.join(os.path.basename(tempdir), - os.path.basename(rootfs)) - - @staticmethod - def _write_pxe_config(fh, kernel_tftp_url, rootfs_nfs_url, device=None, - fdt_subpath=None, extra_args=''): - - if device is None: - ip_cfg = "ip=dhcp" - else: - ip_cfg = "ip=:::::{device}:dhcp::".format(device=device) - - fh.write(textwrap.dedent('''\ - DEFAULT default - LABEL default - LINUX {kernel_url} - APPEND root=/dev/nfs {ip_cfg} nfsroot={rootfs_nfs_url} {extra_args} - ''').format(kernel_url=kernel_tftp_url, ip_cfg=ip_cfg, - rootfs_nfs_url=rootfs_nfs_url, extra_args=extra_args)) - if fdt_subpath is not None: - fh.write("FDT {}\n".format(fdt_subpath)) - fh.flush() - - @contextlib.contextmanager - def local_pxeboot_config(self, tftproot, macaddr, ip, tftp_port, - nfsroot_dir, device=None): - kernel_tftp_url = 'tftp://{}:{}/kernel'.format(ip, tftp_port) - rootfs_nfs_url = '{}:{}'.format(ip, nfsroot_dir) - pxe_cfg_filename = _normalise_macaddr(macaddr) - pxe_cfg_path = os.path.join(tftproot, 'pxelinux.cfg', pxe_cfg_filename) - os.makedirs(os.path.dirname(pxe_cfg_path)) - with open(pxe_cfg_path, 'w') as f: - self._write_pxe_config(fh=f, kernel_tftp_url=kernel_tftp_url, - rootfs_nfs_url=rootfs_nfs_url, - device=device, - extra_args=os.environ.get('KERNEL_ARGS','')) - - try: - with self._local_symlink( - src=pxe_cfg_filename, - dst=os.path.join(tftproot, - 'pxelinux.cfg', - '01-' + pxe_cfg_filename)): - yield - finally: - os.unlink(pxe_cfg_path) - - @contextlib.contextmanager - def remote_pxeboot_config(self, tftproot, kernel_tftproot, kernel_subpath, - fdt_subpath, rootfs_nfsroot, rootfs_subpath, - macaddr): - rootfs_nfs_url = '{}/{}'.format(rootfs_nfsroot, rootfs_subpath) - url = urlparse.urlsplit(kernel_tftproot) - kernel_tftp_url = '{}:{}'.format(url.netloc, kernel_subpath) - pxe_cfg_filename = _normalise_macaddr(macaddr) - url = urlparse.urlsplit(tftproot) - inst_cfg_path = os.path.join(url.path, 'pxelinux.cfg') - with tempfile.NamedTemporaryFile() as f: - self._write_pxe_config( - fh=f, kernel_tftp_url=kernel_tftp_url, - fdt_subpath=fdt_subpath, - rootfs_nfs_url=rootfs_nfs_url, - extra_args=os.environ.get('KERNEL_ARGS','')) - with self._remote_copy( - hostname=url.hostname, src=f.name, - dst=os.path.join(inst_cfg_path, - pxe_cfg_filename)), \ - self._remote_symlink( - hostname=url.hostname, - src=pxe_cfg_filename, - dst=os.path.join(inst_cfg_path, - '01-' + pxe_cfg_filename)): - yield - - @contextlib.contextmanager - def dhcp_server(self, interface, host_ip, target_ip, broadcast_ip): - with self._tempdir() as td: - leases_path = os.path.join(td, 'leases') - config_path = os.path.join(td, 'config') - stdout_path = os.path.join(td, 'stdout') - stderr_path = os.path.join(td, 'stderr') - pidfile_path = os.path.join(td, 'pid') - with open(config_path, 'w') as f: - f.write(textwrap.dedent('''\ - start {target_ip} - end {target_ip} - interface {interface} - max_leases 1 - lease_file {leases_path} - pidfile {pidfile_path} - boot_file pxelinux.0 - option dns {host_ip} - option broadcast {broadcast_ip} - ''').format(**locals())) - with open(stdout_path, 'w') as stdout, \ - open(stderr_path, 'w') as stderr: - sp = subprocess.Popen(['udhcpd', '-f', config_path], cwd=td, - stdin=open(os.devnull), stdout=stdout, - stderr=stderr) - try: - with executor(sp.pid): - yield - finally: - sp.terminate() - - def get_interface_ip(self, interface): - ip_addresses = [] - info = cliapp.runcmd(['ip', '-o', '-f', 'inet', - 'addr', 'show', interface]).rstrip('\n') - if info: - tokens = collections.deque(info.split()[1:]) - ifname = tokens.popleft() - while tokens: - tok = tokens.popleft() - if tok == 'inet': - address = tokens.popleft() - address, netmask = address.split('/') - ip_addresses.append(address) - elif tok == 'brd': - tokens.popleft() # not interested in broadcast address - elif tok == 'scope': - tokens.popleft() # not interested in scope tag - else: - continue - if not ip_addresses: - raise cliapp.AppException('Interface %s has no addresses' - % interface) - if len(ip_addresses) > 1: - warnings.warn('Interface %s has multiple addresses, ' - 'using first (%s)' % (interface, ip_addresses[0])) - return ip_addresses[0] - - def ipmi_set_target_vlan(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN - default = textwrap.dedent('''\ - ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ - lan set 1 vlan id "$PXEBOOT_VLAN" - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please set the target\\'s vlan to $PXEBOOT_VLAN, \\ - then enter \\"vlanned\\" - read - if [ "$REPLY" = vlanned ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_SET_VLAN_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def ipmi_pxe_reboot_target(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN - default = textwrap.dedent('''\ - set -- ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" - "$@" chassis bootdev pxe - "$@" chassis power reset - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please reboot the target in PXE mode, then\\ - enter \\"pxe-booted\\" - read - if [ "$REPLY" = pxe-booted ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_PXE_REBOOT_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def wait_for_target_to_install(self): - command = os.environ.get( - 'PXEBOOT_WAIT_INSTALL_COMMAND', - textwrap.dedent('''\ - while true; do - echo Please wait for the system to install, then \\ - enter \\"installed\\" - read - if [ "$REPLY" = installed ]; then - break - fi - done - ''')) - subprocess.check_call(['sh', '-euc', command, '-']) - - def ipmi_unset_target_vlan(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST - default = textwrap.dedent('''\ - ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ - lan set 1 vlan id off - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please reset the target\\'s vlan, \\ - then enter \\"unvlanned\\" - read - if [ "$REPLY" = unvlanned ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_UNSET_VLAN_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def ipmi_reboot_target(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST - default = textwrap.dedent('''\ - ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ - chassis power reset - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please reboot the target, then\\ - enter \\"rebooted\\" - read - if [ "$REPLY" = rebooted ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_REBOOT_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def process_args(self, (temp_root, macaddr)): - interface = os.environ.get('PXEBOOT_DEPLOYER_INTERFACE', None) - target_interface = os.environ.get('PXEBOOT_TARGET_INTERFACE', None) - vlan = os.environ.get('PXEBOOT_VLAN') - if vlan is not None: vlan = int(vlan) - mode = os.environ.get('PXEBOOT_MODE') - if mode is None: - if interface: - if vlan is not None: - mode = 'spawn-vlan' - else: - if 'PXEBOOT_CONFIG_TFTP_ADDRESS' in os.environ: - mode = 'existing-dhcp' - else: - mode = 'spawn-novlan' - else: - mode = 'existing-server' - assert mode in ('spawn-vlan', 'spawn-novlan', 'existing-dhcp', - 'existing-server') - if mode == 'spawn-vlan': - with self.vlan(interface=interface, vlan=vlan) \ - as (host_ip, target_ip, broadcast_ip), \ - self.tftp_server(host_ip=host_ip, interface=interface) \ - as (tftp_port, tftproot), \ - self.local_pxelinux(tftproot=tftproot), \ - self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ - self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ - self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, - device=target_interface, - ip=host_ip, tftp_port=tftp_port, - nfsroot_dir=temp_root), \ - self.dhcp_server(interface=interface, host_ip=host_ip, - target_ip=target_ip, - broadcast_ip=broadcast_ip): - self.ipmi_set_target_vlan() - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_unset_target_vlan() - self.ipmi_reboot_target() - elif mode == 'spawn-novlan': - with self.static_ip(interface=interface) as (host_ip, target_ip, - broadcast_ip), \ - self.tftp_server(host_ip=host_ip, interface=interface, - tftp_port=69) \ - as (tftp_port, tftproot), \ - self.local_pxelinux(tftproot=tftproot), \ - self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ - self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ - self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, - device=target_interface, - ip=host_ip, tftp_port=tftp_port, - nfsroot_dir=temp_root), \ - self.dhcp_server(interface=interface, host_ip=host_ip, - target_ip=target_ip, - broadcast_ip=broadcast_ip): - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_reboot_target() - elif mode == 'existing-dhcp': - ip = self.get_interface_ip(interface) - config_tftpaddr = os.environ['PXEBOOT_CONFIG_TFTP_ADDRESS'] - with self.tftp_server(ip=ip, interface=interface, tftp_port=69) \ - as (tftp_port, tftproot), \ - self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ - self.local_nfsroot(rootfs=temp_root, client_ip=''): - kernel_tftproot = 'tftp://{}:{}/'.format(ip, tftp_port) - rootfs_nfsroot = '{}:{}'.format(ip, temp_root) - with self.remote_pxeboot_config( - tftproot=config_tftpaddr, - kernel_tftproot=kernel_tftproot, - kernel_subpath='kernel', - rootfs_nfsroot=nfsroot, - rootfs_subpath='', - macaddr=macaddr): - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_reboot_target() - elif mode == 'existing-server': - config_tftpaddr = os.environ[ 'PXEBOOT_CONFIG_TFTP_ADDRESS'] - kernel_tftpaddr = os.environ.get('PXEBOOT_KERNEL_TFTP_ADDRESS', - config_tftpaddr) - url = urlparse.urlsplit(kernel_tftpaddr) - kernel_tftproot = os.environ.get('PXEBOOT_KERNEL_TFTP_ROOT', - 'tftp://%s/%s' % (url.hostname, - url.path)) - rootfs_rsync = os.environ['PXEBOOT_ROOTFS_RSYNC_ADDRESS'] - url = urlparse.urlsplit(rootfs_rsync) - nfsroot = os.environ.get('PXEBOOT_ROOTFS_NFSROOT', - '%s:%s' % (url.hostname, url.path)) - with self.remote_kernel(rootfs=temp_root, tftp_url=kernel_tftpaddr, - macaddr=macaddr) as kernel_subpath, \ - self.remote_fdt(rootfs=temp_root, tftp_url=kernel_tftpaddr, - macaddr=macaddr) as fdt_subpath, \ - self.remote_nfsroot(rootfs=temp_root, rsync_url=rootfs_rsync, \ - macaddr=macaddr) as rootfs_subpath, \ - self.remote_pxeboot_config(tftproot=config_tftpaddr, - kernel_tftproot=kernel_tftproot, - kernel_subpath=kernel_subpath, - fdt_subpath=fdt_subpath, - rootfs_nfsroot=nfsroot, - rootfs_subpath=rootfs_subpath, - macaddr=macaddr): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - if not persist: - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_reboot_target() - else: - cliapp.AppException('Invalid PXEBOOT_MODE: %s' % mode) - -PXEBoot().run() diff --git a/pxeboot.write.help b/pxeboot.write.help deleted file mode 100644 index 7cb78bce..00000000 --- a/pxeboot.write.help +++ /dev/null @@ -1,166 +0,0 @@ -help: > - pxeboot.write extension. - - - This write extension will serve your generated system over NFS to - the target system. - - In all modes `location` is the mac address of the interface that - the target will PXE boot from. This is used so that the target will - load the configuration file appropriate to it. - - - # `PXEBOOT_MODE` - - - It has 4 modes, which can be specified with PXEBOOT_MODE, or inferred - from which parameters are passed: - - - ## spawn-vlan - - - Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_VLAN to configure - the target to pxeboot on a vlan and spawn a dhcp, nfs and tftp - server. This is potentially the fastest, since it doesn't need to - copy data to other servers. - - This will create a vlan interface for the interface specified in - PXEBOOT_DEPLOYER_INTERFACE and spawn a dhcp server which serves - pxelinux.0, a configuration file and a kernel image from itself. - - The configuration file informs the target to boot with a kernel - command-line that uses an NFS root served from the deployment host. - - - ## spawn-novlan - - - Specify PXEBOOT_DEPLOYER_INTERFACE without PXEBOOT_VLAN to configure - like `spawn-vlan`, but without creating the vlan interface. - - This assumes that you have exclusive access to the interface, such - as if you're plugged in to the device directly, or your interface - is vlanned by your infrastructure team. - - This is required if you are serving from a VM and bridging it to the - correct network via macvtap. For this to work, you need to macvtap - bridge to a pre-vlanned interface on your host machine. - - - ## existing-dhcp - - - Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_CONFIG_TFTP_ADDRESS - to put config on an existing tftp server, already configured by the - dhcp server. - - This spawns a tftp server and configures the local nfs server, but - doesn't spawn a dhcp server. This is useful if you have already got a - dhcp server that serves PXE images. - - PXEBOOT_CONFIG_TFTP_ADDRESS is a URL in the form `sftp://$HOST/$PATH`. - The configuration file is copied to `$PATH/pxelinux.cfg/` on the - target identified by `$HOST`. - - - ## existing-server - - - Specify at least PXEBOOT_CONFIG_TFTP_ADDRESS and - PXEBOOT_ROOTFS_RSYNC_ADDRESS to specify existing servers to copy - config, kernels and the rootfs to. - - Configuration is copied to the target as `existing-dhcp`. - - Specify PXEBOOT_KERNEL_TFTP_ADDRESS if the tftp server that the - kernel must be downloaded from is different to that of the pxelinux - configuration file. - - PXEBOOT_ROOTFS_RSYNC_ADDRESS is a rsync URL describing where to copy - nfsroots to where they will be exported by the NFS server. - - Specify PXEBOOT_ROOTFS_NFSROOT if the nfsroot appears as a different - address from the target's perspective. - - - # IPMI commands - - - After the PXE boot has been set up, the target needs to be rebooted - in PXE mode. - - If the target is IPMI enabled, you can set `IPMI_USER`, `IPMI_HOST` - and `IPMI_PASSWORD` to make it reboot the target into netboot mode - automatically. - - If they are not specified, then instructions will be displayed, and - `pxeboot.write` will wait for you to finish. - - If there are command-line automation tools for rebooting the target - in netboot mode, then appropriate commands can be defined in the - following variables. - - - ## PXEBOOT_PXE_REBOOT_COMMAND - - - This command will be used to reboot the target device with its boot - device set to PXE boot. - - - ## PXEBOOT_REBOOT_COMMAND - - - This command will be used to reboot the target device in its default - boot mode. - - - ## PXEBOOT_WAIT_INSTALL_COMMAND - - - If it is possible for the target to notify you that it has finished - installing, you can put a command in here to wait for the event. - - - # Misc - - - ## KERNEL_ARGS - - - Additional kernel command line options. Note that the following - options - - root=/dev/nfs ip=dhcp nfsroot=$NFSROOT` - - are implicitly added by the extension. - - - ## DTB_PATH - - - Location in the deployed root filesystem of the Flattened Device - Tree blob (FDT) to use. - - - ## PXE_INSTALLER - - - If set to `no`, `False` or any other YAML value for false, the - remotely installed rootfs, kernel, bootloader config file and - device tree blob if specified, will not be removed after the - deployment finishes. This variable is only meanful on the - `existing-server` mode. - - - ## PXEBOOT_TARGET_INTERFACE - - Name of the interface of the target to pxeboot from. Some targets - with more than one interface try to get the rootfs from a different - interface than the interface from where the pxeboot server is - reachable. Using this variable, the kernel arguments will be filled - to include the device. - - Note that the name of this interface is the kernel's default name, - usually called ethX, and is non-determinisic. diff --git a/sdk.write b/sdk.write deleted file mode 100755 index 8d3d2a63..00000000 --- a/sdk.write +++ /dev/null @@ -1,284 +0,0 @@ -#!/bin/sh -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# =*= License: GPL-2 =*= - -set -eu - -die(){ - echo "$@" >&2 - exit 1 -} - -shellescape(){ - echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" -} - -########################## END OF COMMON HEADER ############################### -# -# The above lines, as well as being part of this script, are copied into the -# self-installing SDK blob's header script, as a means of re-using content. -# - -help(){ - cat <>"$OUTPUT_SCRIPT" <>"$OUTPUT_SCRIPT" <<'EOF' -########################### START OF HEADER SCRIPT ############################ - -usage(){ - cat <&2 - usage >&2 - exit 1 -fi - -TOOLCHAIN_PATH="$(readlink -f \"$1\")" - -sedescape(){ - # Escape the passed in string so it can be safely interpolated into - # a sed expression as a literal value. - echo "$1" | sed -e 's/[\/&]/\\&/g' -} - -prepend_to_path_elements(){ - # Prepend $1 to every entry in the : separated list specified as $2. - local prefix="$1" - ( - # Split path into components - IFS=: - set -- $2 - # Print path back out with new prefix - printf %s "$prefix/$1" - shift - for arg in "$@"; do - printf ":%s" "$prefix/$arg" - done - ) -} - -extract_rootfs(){ - # Extract the bzipped tarball at the end of the script passed as $1 - # to the path specified as $2 - local selfextractor="$1" - local target="$2" - local script_end="$(($(\ - grep -aEn -m1 -e '^#+ END OF HEADER SCRIPT #+$' "$selfextractor" | - cut -d: -f1) + 1 ))" - mkdir -p "$target" - tail -n +"$script_end" "$selfextractor" | tar -xj -C "$target" . -} - -amend_text_file_paths(){ - # Replace all instances of $3 with $4 in the directory specified by $1 - # excluding the subdirectory $2 - local root="$1" - local inner_sysroot="$2" - local old_prefix="$3" - local new_prefix="$4" - find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ - -exec sh -c 'file "$1" | grep -q text' - {} \; \ - -exec sed -i -e \ - "s/$(sedescape "$old_prefix")/$(sedescape "$new_prefix")/g" {} + -} - -filter_patchelf_errors(){ - # Filter out warnings from patchelf that are acceptable - # The warning that it's making a file bigger is just noise - # The warning about not being an ELF executable just means we got a - # false positive from file that it was an ELF binary - # Failing to find .interp is because for convenience, we set the - # interpreter in the same command as setting the rpath, even though - # we give it both executables and libraries. - grep -v -e 'warning: working around a Linux kernel bug' \ - -e 'not an ELF executable' \ - -e 'cannot find section .interp' -} - -patch_elves(){ - # Set the interpreter and library paths of ELF binaries in $1, - # except for the $2 subdirectory, using the patchelf command in the - # toolchain specified as $3, so that it uses the linker specified - # as $4 as the interpreter, and the runtime path specified by $5. - # - # The patchelf inside the toolchain is used to ensure that it works - # independently of the availability of patchelf on the host. - # - # This is possible by invoking the linker directly and specifying - # --linker-path as the RPATH we want to set the binaries to use. - local root="$1" - local inner_sysroot="$2" - local patchelf="$3" - local linker="$4" - local lib_path="$5" - find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ - -type f -perm +u=x \ - -exec sh -c 'file "$1" | grep -q "ELF"' - {} \; \ - -exec "$linker" --library-path "$lib_path" \ - "$patchelf" --set-interpreter "$linker" \ - --set-rpath "$lib_path" {} \; 2>&1 \ - | filter_patchelf_errors -} - -generate_environment_setup(){ - local target="$1" - install -m 644 -D /dev/stdin "$target" <>"$OUTPUT_SCRIPT" . diff --git a/strip-gplv3.configure b/strip-gplv3.configure deleted file mode 100755 index c08061ad..00000000 --- a/strip-gplv3.configure +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -''' A Morph configuration extension for removing gplv3 chunks from a system - -Using a hard-coded list of chunks, it will read the system's /baserock metadata -to find the files created by that chunk, then remove them. - -''' - -import cliapp -import re -import os -import json - -class StripGPLv3ConfigureExtension(cliapp.Application): - gplv3_chunks = [ - ['autoconf', ''], - ['automake', ''], - ['bash', ''], - ['binutils', ''], - ['bison', ''], - ['ccache', ''], - ['cmake', ''], - ['flex', ''], - ['gawk', ''], - ['gcc', r'^.*lib.*\.so(\.\d+)*$'], - ['gdbm', ''], - ['gettext', ''], - ['gperf', ''], - ['groff', ''], - ['libtool', r'^.*lib.*\.so(\.\d+)*$'], - ['m4', ''], - ['make', ''], - ['nano', ''], - ['patch', ''], - ['rsync', ''], - ['texinfo-tarball', ''], - ] - - def process_args(self, args): - target_root = args[0] - meta_dir = os.path.join(target_root, 'baserock') - - for chunk in self.gplv3_chunks: - regex = os.path.join(meta_dir, "%s-[^-]\+\.meta" % chunk[0]) - artifacts = self.runcmd(['find', meta_dir, '-regex', regex]) - - for artifact in artifacts.split(): - self.remove_chunk(target_root, artifact, chunk[1]) - - os.symlink(os.path.join(os.sep, 'bin', 'busybox'), - os.path.join(target_root, 'usr', 'bin', 'awk')) - - def remove_chunk(self, target_root, chunk, pattern): - chunk_meta_path = os.path.join(target_root, 'baserock', chunk) - - with open(chunk_meta_path, 'r') as f: - chunk_meta_data = json.load(f) - - if not 'contents' in chunk_meta_data: - raise cliapp.AppError('Chunk %s does not have a "contents" list' - % chunk) - updated_contents = [] - for content_entry in reversed(chunk_meta_data['contents']): - pat = re.compile(pattern) - if len(pattern) == 0 or not pat.match(content_entry): - self.remove_content_entry(target_root, content_entry) - else: - updated_contents.append(content_entry) - - def remove_content_entry(self, target_root, content_entry): - entry_path = os.path.join(target_root, './' + content_entry) - if not entry_path.startswith(target_root): - raise cliapp.AppException('%s is not in %s' - % (entry_path, target_root)) - if os.path.exists(entry_path): - if os.path.islink(entry_path): - os.unlink(entry_path) - elif os.path.isfile(entry_path): - os.remove(entry_path) - elif os.path.isdir(entry_path): - if not os.listdir(entry_path): - os.rmdir(entry_path) - else: - raise cliapp.AppException('%s is not a link, file or directory' - % entry_path) -StripGPLv3ConfigureExtension().run() diff --git a/swift-build-rings.yml b/swift-build-rings.yml deleted file mode 100644 index 1ffe9c37..00000000 --- a/swift-build-rings.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - vars: - - rings: - - { name: account, port: 6002 } - - { name: container, port: 6001 } - - { name: object, port: 6000 } - remote_user: root - tasks: - - file: path={{ ansible_env.ROOT }}/etc/swift owner=root group=root state=directory - - - name: Create ring - shell: swift-ring-builder {{ item.name }}.builder create {{ ansible_env.SWIFT_PART_POWER }} - {{ ansible_env.SWIFT_REPLICAS }} {{ ansible_env.SWIFT_MIN_PART_HOURS }} - with_items: rings - - - name: Add each storage node to the ring - shell: swift-ring-builder {{ item[0].name }}.builder - add r1z1-{{ item[1].ip }}:{{ item[0].port }}/{{ item[1].device }} {{ item[1].weight }} - with_nested: - - rings - - ansible_env.SWIFT_STORAGE_DEVICES - - - name: Rebalance the ring - shell: swift-ring-builder {{ item.name }}.builder rebalance {{ ansible_env.SWIFT_REBALANCE_SEED }} - with_items: rings - - - name: Copy ring configuration files into place - copy: src={{ item.name }}.ring.gz dest={{ ansible_env.ROOT }}/etc/swift - with_items: rings - - - name: Copy ring builder files into place - copy: src={{ item.name }}.builder dest={{ ansible_env.ROOT }}/etc/swift - with_items: rings diff --git a/swift-storage-devices-validate.py b/swift-storage-devices-validate.py deleted file mode 100755 index 57ab23d0..00000000 --- a/swift-storage-devices-validate.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# This is used by the openstack-swift.configure extension -# to validate any provided storage device specifiers -# under SWIFT_STORAGE_DEVICES -# - - -''' - This is used by the swift-storage.configure extension - to validate any storage device specifiers specified - in the SWIFT_STORAGE_DEVICES environment variable -''' - -from __future__ import print_function - -import yaml -import sys - -EXAMPLE_DEVSPEC = '{device: sdb1, ip: 127.0.0.1, weight: 100}' -REQUIRED_KEYS = ['ip', 'device', 'weight'] - -def err(msg): - print(msg, file=sys.stderr) - sys.exit(1) - -if len(sys.argv) != 2: - err('usage: %s STRING_TO_BE_VALIDATED' % sys.argv[0]) - -swift_storage_devices = yaml.load(sys.argv[1]) - -if not isinstance(swift_storage_devices, list): - err('Expected list of device specifiers\n' - 'Example: [%s]' % EXAMPLE_DEVSPEC) - -for d in swift_storage_devices: - if not isinstance(d, dict): - err("Invalid device specifier: `%s'\n" - 'Device specifier must be a dictionary\n' - 'Example: %s' % (d, EXAMPLE_DEVSPEC)) - - if set(d.keys()) != set(REQUIRED_KEYS): - err("Invalid device specifier: `%s'\n" - 'Specifier should contain: %s\n' - 'Example: %s' % (d, str(REQUIRED_KEYS)[1:-1], EXAMPLE_DEVSPEC)) diff --git a/swift-storage.configure b/swift-storage.configure deleted file mode 100644 index 391b392a..00000000 --- a/swift-storage.configure +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -# -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -# The ansible script needs to know where the rootfs is, so we export it here -export ROOT="$1" - -validate_number() { - local name="$1" - local value="$2" - - local pattern='^[0-9]+$' - if ! [[ $value =~ $pattern ]] - then - echo "'$name' must be a number" >&2 - exit 1 - fi -} - -validate_non_empty() { - local name="$1" - local value="$2" - - if [[ $value = None ]] - then - echo "'$name' cannot be empty" >&2 - exit 1 - fi -} - -MANDATORY_OPTIONS="SWIFT_HASH_PATH_PREFIX \ - SWIFT_HASH_PATH_SUFFIX \ - SWIFT_REBALANCE_SEED \ - SWIFT_PART_POWER \ - SWIFT_REPLICAS \ - SWIFT_MIN_PART_HOURS \ - SWIFT_STORAGE_DEVICES \ - CONTROLLER_HOST_ADDRESS \ - MANAGEMENT_INTERFACE_IP_ADDRESS" - -for option in $MANDATORY_OPTIONS -do - if ! [[ -v $option ]] - then - missing_option=True - echo "Required option $option isn't set!" >&2 - fi -done - -if [[ $missing_option = True ]]; then exit 1; fi - -./swift-storage-devices-validate.py "$SWIFT_STORAGE_DEVICES" - -# Validate SWIFT_PART_POWER, SWIFT_REPLICAS, SWIFT_MIN_PART_HOURS -# just make sure they're numbers - -validate_number "SWIFT_PART_POWER" "$SWIFT_PART_POWER" -validate_number "SWIFT_REPLICAS" "$SWIFT_REPLICAS" -validate_number "SWIFT_MIN_PART_HOURS" "$SWIFT_MIN_PART_HOURS" - -# Make sure these aren't empty -validate_non_empty "SWIFT_HASH_PATH_PREFIX" "$SWIFT_HASH_PATH_PREFIX" -validate_non_empty "SWIFT_HASH_PATH_SUFFIX" "$SWIFT_HASH_PATH_SUFFIX" -validate_non_empty "SWIFT_REBALANCE_SEED" "$SWIFT_REBALANCE_SEED" -validate_non_empty "CONTROLLER_HOST_ADDRESS" "$CONTROLLER_HOST_ADDRESS" -validate_non_empty "MANAGEMENT_INTERFACE_IP_ADDRESS" "$MANAGEMENT_INTERFACE_IP_ADDRESS" - -mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks - -# A swift controller needs the storage setup service -# but does not want any of the other storage services enabled -ln -s "/usr/lib/systemd/system/swift-storage-setup.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage-setup.service" - -SWIFT_CONTROLLER=${SWIFT_CONTROLLER:-False} - -if [[ $SWIFT_CONTROLLER = False ]] -then - ln -s "/usr/lib/systemd/system/rsync.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/rsync.service" - ln -s "/usr/lib/systemd/system/swift-storage.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage.service" -fi - -# Build swift data structures (the rings) -/usr/bin/ansible-playbook -i hosts swift-build-rings.yml - -cat << EOF > "$ROOT"/usr/share/swift/swift-storage-vars.yml ---- -MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS -SWIFT_HASH_PATH_PREFIX: $SWIFT_HASH_PATH_PREFIX -SWIFT_HASH_PATH_SUFFIX: $SWIFT_HASH_PATH_SUFFIX -EOF diff --git a/trove.configure b/trove.configure deleted file mode 100755 index f823762c..00000000 --- a/trove.configure +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2013 - 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to fully configure -# a Trove instance at deployment time. It uses the following variables -# from the environment (run `morph help trove.configure` to see a description -# of them): -# -# * TROVE_ID -# * TROVE_HOSTNAME (optional, defaults to TROVE_ID) -# * TROVE_COMPANY -# * LORRY_SSH_KEY -# * UPSTREAM_TROVE -# * UPSTREAM_TROVE_PROTOCOL -# * TROVE_ADMIN_USER -# * TROVE_ADMIN_EMAIL -# * TROVE_ADMIN_NAME -# * TROVE_ADMIN_SSH_PUBKEY -# * LORRY_CONTROLLER_MINIONS (optional, defaults to 4) -# * TROVE_BACKUP_KEYS - a space-separated list of paths to SSH keys. -# (optional) -# * TROVE_GENERIC (optional) -# -# The configuration of a Trove is slightly tricky: part of it has to -# be run on the configured system after it has booted. We accomplish -# this by copying in all the relevant data to the target system -# (in /var/lib/trove-setup), and creating a systemd unit file that -# runs on the first boot. The first boot will be detected by the -# existence of the /var/lib/trove-setup/needed file. - -set -e - -if [ "$TROVE_GENERIC" ] -then - echo "Not configuring the trove, it will be generic" - exit 0 -fi - - -# Check that all the variables needed are present: - -error_vars=false -if test "x$TROVE_ID" = "x"; then - echo "ERROR: TROVE_ID needs to be defined." - error_vars=true -fi - -if test "x$TROVE_COMPANY" = "x"; then - echo "ERROR: TROVE_COMPANY needs to be defined." - error_vars=true -fi - -if test "x$TROVE_ADMIN_USER" = "x"; then - echo "ERROR: TROVE_ADMIN_USER needs to be defined." - error_vars=true -fi - -if test "x$TROVE_ADMIN_NAME" = "x"; then - echo "ERROR: TROVE_ADMIN_NAME needs to be defined." - error_vars=true -fi - -if test "x$TROVE_ADMIN_EMAIL" = "x"; then - echo "ERROR: TROVE_ADMIN_EMAIL needs to be defined." - error_vars=true -fi - -if ! ssh-keygen -lf $LORRY_SSH_KEY > /dev/null 2>&1 -then - echo "ERROR: LORRY_SSH_KEY is not a vaild ssh key." - error_vars=true -fi - -if ! ssh-keygen -lf $WORKER_SSH_PUBKEY > /dev/null 2>&1 -then - echo "ERROR: WORKER_SSH_PUBKEY is not a vaild ssh key." - error_vars=true -fi - -if ! ssh-keygen -lf $TROVE_ADMIN_SSH_PUBKEY > /dev/null 2>&1 -then - echo "ERROR: TROVE_ADMIN_SSH_PUBKEY is not a vaild ssh key." - error_vars=true -fi - -if "$error_vars"; then - exit 1 -fi - -ROOT="$1" - - -TROVE_DATA="$ROOT/etc/trove" -mkdir -p "$TROVE_DATA" - -install -m 0600 "$LORRY_SSH_KEY" "$TROVE_DATA/lorry.key" -install -m 0644 "${LORRY_SSH_KEY}.pub" "$TROVE_DATA/lorry.key.pub" -install -m 0644 "$TROVE_ADMIN_SSH_PUBKEY" "$TROVE_DATA/admin.key.pub" -install -m 0644 "$WORKER_SSH_PUBKEY" "$TROVE_DATA/worker.key.pub" - - -python <<'EOF' >"$TROVE_DATA/trove.conf" -import os, sys, yaml - -trove_configuration={ - 'TROVE_ID': os.environ['TROVE_ID'], - 'TROVE_COMPANY': os.environ['TROVE_COMPANY'], - 'TROVE_ADMIN_USER': os.environ['TROVE_ADMIN_USER'], - 'TROVE_ADMIN_EMAIL': os.environ['TROVE_ADMIN_EMAIL'], - 'TROVE_ADMIN_NAME': os.environ['TROVE_ADMIN_NAME'], - 'LORRY_SSH_KEY': '/etc/trove/lorry.key', - 'LORRY_SSH_PUBKEY': '/etc/trove/lorry.key.pub', - 'TROVE_ADMIN_SSH_PUBKEY': '/etc/trove/admin.key.pub', - 'WORKER_SSH_PUBKEY': '/etc/trove/worker.key.pub', -} - - - -optional_keys = ('MASON_ID', 'HOSTNAME', 'TROVE_HOSTNAME', - 'LORRY_CONTROLLER_MINIONS', 'TROVE_BACKUP_KEYS', - 'UPSTREAM_TROVE', 'UPSTREAM_TROVE_PROTOCOL') - -for key in optional_keys: - if key in os.environ: - trove_configuration[key]=os.environ[key] - -yaml.dump(trove_configuration, sys.stdout, default_flow_style=False) -EOF - -if [ -n "$TROVE_BACKUP_KEYS" ]; then - mkdir -p "$TROVE_DATA/backup-keys" - cp -- $TROVE_BACKUP_KEYS "$TROVE_DATA/backup-keys" - echo "TROVE_BACKUP_KEYS: /etc/trove/backup-keys/*" >> "$TROVE_DATA/trove.conf" -fi diff --git a/trove.configure.help b/trove.configure.help deleted file mode 100644 index c96bdf74..00000000 --- a/trove.configure.help +++ /dev/null @@ -1,126 +0,0 @@ -help: | - This is a "morph deploy" configuration extension to fully configure - a Trove instance at deployment time. It uses the following - configuration variables: - - * `TROVE_ID` - * `TROVE_HOSTNAME` (optional, defaults to `TROVE_ID`) - * `TROVE_COMPANY` - * `LORRY_SSH_KEY` - * `UPSTREAM_TROVE` - * `TROVE_ADMIN_USER` - * `TROVE_ADMIN_EMAIL` - * `TROVE_ADMIN_NAME` - * `TROVE_ADMIN_SSH_PUBKEY` - * `LORRY_CONTROLLER_MINIONS` (optional, defaults to 4) - * `TROVE_BACKUP_KEYS` - a space-separated list of paths to SSH keys. - (optional) - - The variables are described in more detail below. - - A Trove deployment needs to know the following things: - - * The Trove's ID and public name. - * The Trove's administrator name and access details. - * Private and public SSH keys for the Lorry user on the Trove. - * Which upstream Trove it should be set to mirror upon initial deploy. - - These are specified with the configuration variables described in this - help. - - * `TROVE_GENERIC` -- boolean. If it's true the trove will be generic - and it won't be configured with any of the other variables listed - here. - - * `TROVE_ID` -- the identifier of the Trove. This separates it from - other Troves, and allows mirroring of Troves to happen without local - changes getting overwritten. - - The Trove ID is used in several ways. Any local repositories (those not - mirrored from elsewhere) get created under a prefix that is the ID. - Thus, the local repositories on the `git.baserock.org` Trove, whose - Trove ID is `baserock`, are named - `baserock/baserock/definitions.git` and similar. The ID is used - there twice: first as a prefix and then as a "project name" within - that prefix. There can be more projects under the prefix. For - example, there is a `baserock/local-config/lorries.git` repository, - where `local-config` is a separate project from `baserock`. Projects - here are a concept for the Trove's git access control language. - - The Trove ID also used as the prefix for any branch and tag names - created locally for repositories that are not local. Thus, in the - `delta/linux.git` repository, any local branches would be called - something like `baserock/morph`, instead of just `morph`. The - Trove's git access control prevents normal uses from pushing - branches and tags that do not have the Trove ID as the prefix. - - * `TROVE_HOSTNAME` -- the public name of the Trove. This is an - optional setting, and defaults to `TROVE_ID`. The public name is - typically the domain name of the server (e.g., `git.baserock.org`), - but can also be an IP address. This setting is used when Trove needs - to generate URLs that point to itself, such as the `git://` and - `http://` URLs for each git repository that is viewed via the web - interface. - - Note that this is _not_ the system hostname. That is set separately, - with the `HOSTNAME` configuration setting (see the - `set-hostname.configure` extension). - - * `TROVE_COMPANY` -- a description of the organisation who own the - Trove. This is shown in various parts of the web interface of the - Trove. It is for descriptive purposes only. - - * `LORRY_SSH_KEY` -- ssh key pair that the Trove's Lorry will use to - access an upstream Trove, and to push updates to the Trove's git - server. - - The value is a filename on the system doing the deployment (where - `morph deploy` is run). The file contains the _private_ key, and the - public key is in a file with the `.pub` suffix added to the name. - - The upstream Trove needs to be configured to allow this key to - access it. This configuration does not do that automatically. - - * `UPSTREAM_TROVE` -- public name of the upstream Trove (domain - name or IP address). This is an optional setting. If it's set, - the new Trove will be configured to mirror that Trove. - - * `TROVE_ADMIN_USER`, `TROVE_ADMIN_EMAIL`, `TROVE_ADMIN_NAME`, - `TROVE_ADMIN_SSH_PUBKEY` -- details of the Trove's (initial) - administrator. - - Each Trove needs at least one administrator user, and one is created - upon initial deployment. `TROVE_ADMIN_USER` is the username of the - account to be created, `TROVE_ADMIN_EMAIL` should be the e-mail of - the user, and `TROVE_ADMIN_NAME` is their name. If more - administrators are needed, the initial person should create them - using the usual Gitano commands. - - * `LORRY_CONTROLLER_MINIONS` -- the number of Lorry Controller worker - processes to start. This is an optional setting and defaults to 4. - The more workers are running, the more Lorry jobs can run at the same - time, but the more resources they require. - - * `TROVE_BACKUP_KEYS` -- a space-separated list of paths to SSH keys. - If this is set, the Trove will have a backup user that can be accessed - with rsync using the SSH keys provided. - - Example - ------- - - The following set of variables could be to deploy a Trove instance: - - TROVE_ID: my-trove - TROVE_HOSTNAME: my-trove.example.com - TROVE_COMPANY: My Personal Trove for Me, Myself and I - LORRY_SSH_KEY: my-trove/lorry.key - UPSTREAM_TROVE: git.baserock.org - UPSTREAM_TROVE_USER: my-trove - UPSTREAM_TROVE_EMAIL: my-trove@example.com - TROVE_ADMIN_USER: tomjon - TROVE_ADMIN_EMAIL: tomjon@example.com - TROVE_ADMIN_NAME: Tomjon of Lancre - TROVE_ADMIN_SSH_PUBKEY: my-trove/tomjon.key.pub - - These would be put into the cluster morphology used to do the - deployment. diff --git a/vagrant.configure b/vagrant.configure deleted file mode 100644 index abc3ea0c..00000000 --- a/vagrant.configure +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License.5 -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -set -e - -ROOT="$1" - -if test "x$VAGRANT" = "x"; then - exit 0 -fi - -for needed in etc/ssh/sshd_config etc/sudoers; do - if ! test -e "$ROOT/$needed"; then - echo >&2 "Unable to find $needed" - echo >&2 "Cannot continue configuring as Vagrant basebox" - exit 1 - fi -done - -# SSH daemon needs to be configured to not use DNS... -sed -i -e's/^(.*[Uu]][Ss][Ee][Dd][Nn][Ss].*)$/#\1/' "$ROOT/etc/ssh/sshd_config" -echo "UseDNS no" >> "$ROOT/etc/ssh/sshd_config" - -# We need to add a vagrant user with "vagrant" as the password We're doing this -# manually because chrooting in to run adduser is not really allowed for -# deployment time since we wouldn't be able to run the adduser necessarily. In -# practice for now we'd be able to because we can't deploy raw disks -# cross-platform and expect extlinux to install but we won't, for good -# practice and to hilight this deficiency. -echo 'vagrant:x:1000:1000:Vagrant User:/home/vagrant:/bin/bash' >> "$ROOT/etc/passwd" -echo 'vagrant:/6PTOoWylhw3w:16198:0:99999:7:::' >> "$ROOT/etc/shadow" -echo 'vagrant:x:1000:' >> "$ROOT/etc/group" -mkdir -p "$ROOT/home/vagrant" -chown -R 1000:1000 "$ROOT/home/vagrant" - -# Next, the vagrant user is meant to have sudo access -echo 'vagrant ALL=(ALL) NOPASSWD: ALL' >> "$ROOT/etc/sudoers" - -# And ensure that we get sbin in our path -echo 'PATH="$PATH:/sbin:/usr/sbin"' >> "$ROOT/etc/profile" -echo 'export PATH' >> "$ROOT/etc/profile" - -- cgit v1.2.1